llvm/test/Object/ar-error.test: Don't check the message "No such file or directory".
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrNEON.td
1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the AArch64 NEON instruction set.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17
18 // (outs Result), (ins Imm, OpCmode)
19 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
20
21 def Neon_movi     : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
22
23 def Neon_mvni     : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
24
25 // (outs Result), (ins Imm)
26 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
27                         [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
28
29 // (outs Result), (ins LHS, RHS, CondCode)
30 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
31                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
32
33 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
34 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
35                  [SDTCisVec<0>,  SDTCisVec<1>]>>;
36
37 // (outs Result), (ins LHS, RHS)
38 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
39                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
40
41 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
42                                      SDTCisVT<2, i32>]>;
43 def Neon_sqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
44 def Neon_uqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
45
46 def SDTPERMUTE : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
47                                SDTCisSameAs<0, 2>]>;
48 def Neon_uzp1    : SDNode<"AArch64ISD::NEON_UZP1", SDTPERMUTE>;
49 def Neon_uzp2    : SDNode<"AArch64ISD::NEON_UZP2", SDTPERMUTE>;
50 def Neon_zip1    : SDNode<"AArch64ISD::NEON_ZIP1", SDTPERMUTE>;
51 def Neon_zip2    : SDNode<"AArch64ISD::NEON_ZIP2", SDTPERMUTE>;
52 def Neon_trn1    : SDNode<"AArch64ISD::NEON_TRN1", SDTPERMUTE>;
53 def Neon_trn2    : SDNode<"AArch64ISD::NEON_TRN2", SDTPERMUTE>;
54
55 def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
56 def Neon_rev64    : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
57 def Neon_rev32    : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
58 def Neon_rev16    : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
59 def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
60                        [SDTCisVec<0>]>>;
61 def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
62                            [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
63 def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
64                            [SDTCisVec<0>,  SDTCisSameAs<0, 1>,
65                            SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
66
67 //===----------------------------------------------------------------------===//
68 // Addressing-mode instantiations
69 //===----------------------------------------------------------------------===//
70
71 multiclass ls_64_pats<dag address, dag Base, dag Offset, ValueType Ty> {
72 defm : ls_neutral_pats<LSFP64_LDR, LSFP64_STR, Base,
73                       !foreach(decls.pattern, Offset,
74                                !subst(OFFSET, dword_uimm12, decls.pattern)),
75                       !foreach(decls.pattern, address,
76                                !subst(OFFSET, dword_uimm12,
77                                !subst(ALIGN, min_align8, decls.pattern))),
78                       Ty>;
79 }
80
81 multiclass ls_128_pats<dag address, dag Base, dag Offset, ValueType Ty> {
82 defm : ls_neutral_pats<LSFP128_LDR, LSFP128_STR, Base,
83                        !foreach(decls.pattern, Offset,
84                                 !subst(OFFSET, qword_uimm12, decls.pattern)),
85                        !foreach(decls.pattern, address,
86                                 !subst(OFFSET, qword_uimm12,
87                                 !subst(ALIGN, min_align16, decls.pattern))),
88                       Ty>;
89 }
90
91 multiclass uimm12_neon_pats<dag address, dag Base, dag Offset> {
92   defm : ls_64_pats<address, Base, Offset, v8i8>;
93   defm : ls_64_pats<address, Base, Offset, v4i16>;
94   defm : ls_64_pats<address, Base, Offset, v2i32>;
95   defm : ls_64_pats<address, Base, Offset, v1i64>;
96   defm : ls_64_pats<address, Base, Offset, v2f32>;
97   defm : ls_64_pats<address, Base, Offset, v1f64>;
98
99   defm : ls_128_pats<address, Base, Offset, v16i8>;
100   defm : ls_128_pats<address, Base, Offset, v8i16>;
101   defm : ls_128_pats<address, Base, Offset, v4i32>;
102   defm : ls_128_pats<address, Base, Offset, v2i64>;
103   defm : ls_128_pats<address, Base, Offset, v4f32>;
104   defm : ls_128_pats<address, Base, Offset, v2f64>;
105 }
106
107 defm : uimm12_neon_pats<(A64WrapperSmall
108                           tconstpool:$Hi, tconstpool:$Lo12, ALIGN),
109                         (ADRPxi tconstpool:$Hi), (i64 tconstpool:$Lo12)>;
110
111 //===----------------------------------------------------------------------===//
112 // Multiclasses
113 //===----------------------------------------------------------------------===//
114
115 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size,  bits<5> opcode,
116                                 string asmop, SDPatternOperator opnode8B,
117                                 SDPatternOperator opnode16B,
118                                 bit Commutable = 0> {
119   let isCommutable = Commutable in {
120     def _8B :  NeonI_3VSame<0b0, u, size, opcode,
121                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
122                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
123                [(set (v8i8 VPR64:$Rd),
124                   (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
125                NoItinerary>,
126                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
127
128     def _16B : NeonI_3VSame<0b1, u, size, opcode,
129                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
130                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
131                [(set (v16i8 VPR128:$Rd),
132                   (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
133                NoItinerary>,
134                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
135   }
136
137 }
138
139 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
140                                   string asmop, SDPatternOperator opnode,
141                                   bit Commutable = 0> {
142   let isCommutable = Commutable in {
143     def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
144               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
145               asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
146               [(set (v4i16 VPR64:$Rd),
147                  (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
148               NoItinerary>,
149               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
150
151     def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
152               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
153               asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
154               [(set (v8i16 VPR128:$Rd),
155                  (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
156               NoItinerary>,
157               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
158
159     def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
160               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
161               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
162               [(set (v2i32 VPR64:$Rd),
163                  (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
164               NoItinerary>,
165               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
166
167     def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
168               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
169               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
170               [(set (v4i32 VPR128:$Rd),
171                  (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
172               NoItinerary>,
173               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
174   }
175 }
176 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
177                                   string asmop, SDPatternOperator opnode,
178                                   bit Commutable = 0>
179    : NeonI_3VSame_HS_sizes<u, opcode,  asmop, opnode, Commutable> {
180   let isCommutable = Commutable in {
181     def _8B :  NeonI_3VSame<0b0, u, 0b00, opcode,
182                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
183                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
184                [(set (v8i8 VPR64:$Rd),
185                   (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
186                NoItinerary>,
187                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
188
189     def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
190                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
191                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
192                [(set (v16i8 VPR128:$Rd),
193                   (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
194                NoItinerary>,
195                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
196   }
197 }
198
199 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
200                                    string asmop, SDPatternOperator opnode,
201                                    bit Commutable = 0>
202    : NeonI_3VSame_BHS_sizes<u, opcode,  asmop, opnode, Commutable> {
203   let isCommutable = Commutable in {
204     def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
205               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
206               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
207               [(set (v2i64 VPR128:$Rd),
208                  (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
209               NoItinerary>,
210               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
211   }
212 }
213
214 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
215 // but Result types can be integer or floating point types.
216 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
217                                  string asmop, SDPatternOperator opnode,
218                                  ValueType ResTy2S, ValueType ResTy4S,
219                                  ValueType ResTy2D, bit Commutable = 0> {
220   let isCommutable = Commutable in {
221     def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
222               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
223               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
224               [(set (ResTy2S VPR64:$Rd),
225                  (ResTy2S (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
226               NoItinerary>,
227               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
228
229     def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
230               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
231               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
232               [(set (ResTy4S VPR128:$Rd),
233                  (ResTy4S (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
234               NoItinerary>,
235               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
236
237     def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
238               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
239               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
240               [(set (ResTy2D VPR128:$Rd),
241                  (ResTy2D (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
242               NoItinerary>,
243               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
244   }
245 }
246
247 //===----------------------------------------------------------------------===//
248 // Instruction Definitions
249 //===----------------------------------------------------------------------===//
250
251 // Vector Arithmetic Instructions
252
253 // Vector Add (Integer and Floating-Point)
254
255 defm ADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
256 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd,
257                                      v2f32, v4f32, v2f64, 1>;
258
259 // Patterns to match add of v1i8/v1i16/v1i32 types
260 def : Pat<(v1i8 (add FPR8:$Rn, FPR8:$Rm)),
261           (EXTRACT_SUBREG
262               (ADDvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
263                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
264               sub_8)>;
265 def : Pat<(v1i16 (add FPR16:$Rn, FPR16:$Rm)),
266           (EXTRACT_SUBREG
267               (ADDvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
268                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
269               sub_16)>;
270 def : Pat<(v1i32 (add FPR32:$Rn, FPR32:$Rm)),
271           (EXTRACT_SUBREG
272               (ADDvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
273                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
274               sub_32)>;
275
276 // Vector Sub (Integer and Floating-Point)
277
278 defm SUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
279 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub,
280                                      v2f32, v4f32, v2f64, 0>;
281
282 // Patterns to match sub of v1i8/v1i16/v1i32 types
283 def : Pat<(v1i8 (sub FPR8:$Rn, FPR8:$Rm)),
284           (EXTRACT_SUBREG
285               (SUBvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
286                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
287               sub_8)>;
288 def : Pat<(v1i16 (sub FPR16:$Rn, FPR16:$Rm)),
289           (EXTRACT_SUBREG
290               (SUBvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
291                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
292               sub_16)>;
293 def : Pat<(v1i32 (sub FPR32:$Rn, FPR32:$Rm)),
294           (EXTRACT_SUBREG
295               (SUBvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
296                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
297               sub_32)>;
298
299 // Vector Multiply (Integer and Floating-Point)
300
301 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
302 defm MULvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
303 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul,
304                                      v2f32, v4f32, v2f64, 1>;
305 }
306
307 // Patterns to match mul of v1i8/v1i16/v1i32 types
308 def : Pat<(v1i8 (mul FPR8:$Rn, FPR8:$Rm)),
309           (EXTRACT_SUBREG 
310               (MULvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
311                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
312               sub_8)>;
313 def : Pat<(v1i16 (mul FPR16:$Rn, FPR16:$Rm)),
314           (EXTRACT_SUBREG 
315               (MULvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
316                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
317               sub_16)>;
318 def : Pat<(v1i32 (mul FPR32:$Rn, FPR32:$Rm)),
319           (EXTRACT_SUBREG
320               (MULvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
321                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
322               sub_32)>;
323
324 // Vector Multiply (Polynomial)
325
326 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
327 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
328                                     int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
329 }
330
331 // Vector Multiply-accumulate and Multiply-subtract (Integer)
332
333 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
334 // two operands constraints.
335 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
336   RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
337   bits<5> opcode, SDPatternOperator opnode>
338   : NeonI_3VSame<q, u, size, opcode,
339     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
340     asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
341     [(set (OpTy VPRC:$Rd),
342        (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
343     NoItinerary>,
344     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
345   let Constraints = "$src = $Rd";
346 }
347
348 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
349                        (add node:$Ra, (mul node:$Rn, node:$Rm))>;
350
351 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
352                        (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
353
354
355 let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC] in {
356 def MLAvvv_8B:  NeonI_3VSame_Constraint_impl<"mla", ".8b",  VPR64,  v8i8,
357                                              0b0, 0b0, 0b00, 0b10010, Neon_mla>;
358 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
359                                              0b1, 0b0, 0b00, 0b10010, Neon_mla>;
360 def MLAvvv_4H:  NeonI_3VSame_Constraint_impl<"mla", ".4h",  VPR64,  v4i16,
361                                              0b0, 0b0, 0b01, 0b10010, Neon_mla>;
362 def MLAvvv_8H:  NeonI_3VSame_Constraint_impl<"mla", ".8h",  VPR128, v8i16,
363                                              0b1, 0b0, 0b01, 0b10010, Neon_mla>;
364 def MLAvvv_2S:  NeonI_3VSame_Constraint_impl<"mla", ".2s",  VPR64,  v2i32,
365                                              0b0, 0b0, 0b10, 0b10010, Neon_mla>;
366 def MLAvvv_4S:  NeonI_3VSame_Constraint_impl<"mla", ".4s",  VPR128, v4i32,
367                                              0b1, 0b0, 0b10, 0b10010, Neon_mla>;
368
369 def MLSvvv_8B:  NeonI_3VSame_Constraint_impl<"mls", ".8b",  VPR64,  v8i8,
370                                              0b0, 0b1, 0b00, 0b10010, Neon_mls>;
371 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
372                                              0b1, 0b1, 0b00, 0b10010, Neon_mls>;
373 def MLSvvv_4H:  NeonI_3VSame_Constraint_impl<"mls", ".4h",  VPR64,  v4i16,
374                                              0b0, 0b1, 0b01, 0b10010, Neon_mls>;
375 def MLSvvv_8H:  NeonI_3VSame_Constraint_impl<"mls", ".8h",  VPR128, v8i16,
376                                              0b1, 0b1, 0b01, 0b10010, Neon_mls>;
377 def MLSvvv_2S:  NeonI_3VSame_Constraint_impl<"mls", ".2s",  VPR64,  v2i32,
378                                              0b0, 0b1, 0b10, 0b10010, Neon_mls>;
379 def MLSvvv_4S:  NeonI_3VSame_Constraint_impl<"mls", ".4s",  VPR128, v4i32,
380                                              0b1, 0b1, 0b10, 0b10010, Neon_mls>;
381 }
382
383 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
384
385 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
386                         (fadd node:$Ra, (fmul_su node:$Rn, node:$Rm))>;
387
388 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
389                         (fsub node:$Ra, (fmul_su node:$Rn, node:$Rm))>;
390
391 let Predicates = [HasNEON, UseFusedMAC],
392     SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC] in {
393 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s",  VPR64,  v2f32,
394                                              0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
395 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s",  VPR128, v4f32,
396                                              0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
397 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d",  VPR128, v2f64,
398                                              0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
399
400 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s",  VPR64,  v2f32,
401                                               0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
402 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s",  VPR128, v4f32,
403                                              0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
404 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d",  VPR128, v2f64,
405                                              0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
406 }
407
408 // We're also allowed to match the fma instruction regardless of compile
409 // options.
410 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
411           (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
412 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
413           (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
414 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
415           (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
416
417 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
418           (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
419 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
420           (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
421 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
422           (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
423
424 // Vector Divide (Floating-Point)
425
426 let SchedRW = [WriteFPDiv, ReadFPDiv, ReadFPDiv] in {
427 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv,
428                                      v2f32, v4f32, v2f64, 0>;
429 }
430
431 // Vector Bitwise Operations
432
433 // Vector Bitwise AND
434
435 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
436
437 // Vector Bitwise Exclusive OR
438
439 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
440
441 // Vector Bitwise OR
442
443 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
444
445 // ORR disassembled as MOV if Vn==Vm
446
447 // Vector Move - register
448 // Alias for ORR if Vn=Vm.
449 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
450                     (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>;
451 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
452                     (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>;
453
454 // The MOVI instruction takes two immediate operands.  The first is the
455 // immediate encoding, while the second is the cmode.  A cmode of 14, or
456 // 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
457 def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
458 def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
459
460 def Neon_not8B  : PatFrag<(ops node:$in),
461                           (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
462 def Neon_not16B : PatFrag<(ops node:$in),
463                           (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
464
465 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
466                          (or node:$Rn, (Neon_not8B node:$Rm))>;
467
468 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
469                           (or node:$Rn, (Neon_not16B node:$Rm))>;
470
471 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
472                          (and node:$Rn, (Neon_not8B node:$Rm))>;
473
474 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
475                           (and node:$Rn, (Neon_not16B node:$Rm))>;
476
477
478 // Vector Bitwise OR NOT - register
479
480 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
481                                    Neon_orn8B, Neon_orn16B, 0>;
482
483 // Vector Bitwise Bit Clear (AND NOT) - register
484
485 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
486                                    Neon_bic8B, Neon_bic16B, 0>;
487
488 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
489                                    SDPatternOperator opnode16B,
490                                    Instruction INST8B,
491                                    Instruction INST16B> {
492   def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
493             (INST8B VPR64:$Rn, VPR64:$Rm)>;
494   def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
495             (INST8B VPR64:$Rn, VPR64:$Rm)>;
496   def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
497             (INST8B VPR64:$Rn, VPR64:$Rm)>;
498   def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
499             (INST16B VPR128:$Rn, VPR128:$Rm)>;
500   def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
501             (INST16B VPR128:$Rn, VPR128:$Rm)>;
502   def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
503             (INST16B VPR128:$Rn, VPR128:$Rm)>;
504 }
505
506 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
507 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
508 defm : Neon_bitwise2V_patterns<or,  or,  ORRvvv_8B, ORRvvv_16B>;
509 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
510 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
511 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
512
513 //   Vector Bitwise Select
514 def BSLvvv_8B  : NeonI_3VSame_Constraint_impl<"bsl", ".8b",  VPR64, v8i8,
515                                               0b0, 0b1, 0b01, 0b00011, vselect>;
516
517 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
518                                               0b1, 0b1, 0b01, 0b00011, vselect>;
519
520 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
521                                    Instruction INST8B,
522                                    Instruction INST16B> {
523   // Disassociate type from instruction definition
524   def : Pat<(v8i8 (opnode (v8i8 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
525             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
526   def : Pat<(v2i32 (opnode (v2i32 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
527             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
528   def : Pat<(v2f32 (opnode (v2i32 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
529             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
530   def : Pat<(v4i16 (opnode (v4i16 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
531             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
532   def : Pat<(v1i64 (opnode (v1i64 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
533             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
534   def : Pat<(v1f64 (opnode (v1i64 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
535             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
536   def : Pat<(v16i8 (opnode (v16i8 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
537             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
538   def : Pat<(v4i32 (opnode (v4i32 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
539             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
540   def : Pat<(v8i16 (opnode (v8i16 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
541             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
542   def : Pat<(v2i64 (opnode (v2i64 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
543             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
544   def : Pat<(v2f64 (opnode (v2i64 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
545             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
546   def : Pat<(v4f32 (opnode (v4i32 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
547             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
548
549   // Allow to match BSL instruction pattern with non-constant operand
550   def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
551                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
552           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
553   def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
554                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
555           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
556   def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
557                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
558           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
559   def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
560                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
561           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
562   def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
563                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
564           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
565   def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
566                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
567           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
568   def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
569                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
570           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
571   def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
572                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
573           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
574
575   // Allow to match llvm.arm.* intrinsics.
576   def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
577                     (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
578             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
579   def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
580                     (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
581             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
582   def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
583                     (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
584             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
585   def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
586                     (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
587             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
588   def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
589                     (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
590             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
591   def : Pat<(v1f64 (int_arm_neon_vbsl (v1f64 VPR64:$src),
592                     (v1f64 VPR64:$Rn), (v1f64 VPR64:$Rm))),
593             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
594   def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
595                     (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
596             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
597   def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
598                     (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
599             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
600   def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
601                     (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
602             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
603   def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
604                     (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
605             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
606   def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
607                     (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
608             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
609   def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
610                     (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
611             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
612 }
613
614 // Additional patterns for bitwise instruction BSL
615 defm: Neon_bitwise3V_patterns<vselect, BSLvvv_8B, BSLvvv_16B>;
616
617 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
618                            (vselect node:$src, node:$Rn, node:$Rm),
619                            [{ (void)N; return false; }]>;
620
621 // Vector Bitwise Insert if True
622
623 def BITvvv_8B  : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64,   v8i8,
624                    0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
625 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
626                    0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
627
628 // Vector Bitwise Insert if False
629
630 def BIFvvv_8B  : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64,  v8i8,
631                                 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
632 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
633                                 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
634
635 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
636
637 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
638                        (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
639 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
640                        (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
641
642 // Vector Absolute Difference and Accumulate (Unsigned)
643 def UABAvvv_8B :  NeonI_3VSame_Constraint_impl<"uaba", ".8b",  VPR64,  v8i8,
644                     0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
645 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
646                     0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
647 def UABAvvv_4H :  NeonI_3VSame_Constraint_impl<"uaba", ".4h",  VPR64,  v4i16,
648                     0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
649 def UABAvvv_8H :  NeonI_3VSame_Constraint_impl<"uaba", ".8h",  VPR128, v8i16,
650                     0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
651 def UABAvvv_2S :  NeonI_3VSame_Constraint_impl<"uaba", ".2s",  VPR64,  v2i32,
652                     0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
653 def UABAvvv_4S :  NeonI_3VSame_Constraint_impl<"uaba", ".4s",  VPR128, v4i32,
654                     0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
655
656 // Vector Absolute Difference and Accumulate (Signed)
657 def SABAvvv_8B :  NeonI_3VSame_Constraint_impl<"saba", ".8b",  VPR64,  v8i8,
658                     0b0, 0b0, 0b00, 0b01111, Neon_saba>;
659 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
660                     0b1, 0b0, 0b00, 0b01111, Neon_saba>;
661 def SABAvvv_4H :  NeonI_3VSame_Constraint_impl<"saba", ".4h",  VPR64,  v4i16,
662                     0b0, 0b0, 0b01, 0b01111, Neon_saba>;
663 def SABAvvv_8H :  NeonI_3VSame_Constraint_impl<"saba", ".8h",  VPR128, v8i16,
664                     0b1, 0b0, 0b01, 0b01111, Neon_saba>;
665 def SABAvvv_2S :  NeonI_3VSame_Constraint_impl<"saba", ".2s",  VPR64,  v2i32,
666                     0b0, 0b0, 0b10, 0b01111, Neon_saba>;
667 def SABAvvv_4S :  NeonI_3VSame_Constraint_impl<"saba", ".4s",  VPR128, v4i32,
668                     0b1, 0b0, 0b10, 0b01111, Neon_saba>;
669
670
671 // Vector Absolute Difference (Signed, Unsigned)
672 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
673 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
674
675 // Vector Absolute Difference (Floating Point)
676 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
677                                     int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
678
679 // Vector Reciprocal Step (Floating Point)
680 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
681                                        int_arm_neon_vrecps,
682                                        v2f32, v4f32, v2f64, 0>;
683
684 // Vector Reciprocal Square Root Step (Floating Point)
685 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
686                                         int_arm_neon_vrsqrts,
687                                         v2f32, v4f32, v2f64, 0>;
688
689 // Vector Comparisons
690
691 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
692                         (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
693 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
694                          (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
695 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
696                         (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
697 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
698                         (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
699 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
700                         (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
701
702 // NeonI_compare_aliases class: swaps register operands to implement
703 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
704 class NeonI_compare_aliases<string asmop, string asmlane,
705                             Instruction inst, RegisterOperand VPRC>
706   : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
707                     ", $Rm" # asmlane,
708                   (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
709
710 // Vector Comparisons (Integer)
711
712 // Vector Compare Mask Equal (Integer)
713 let isCommutable =1 in {
714 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
715 }
716
717 // Vector Compare Mask Higher or Same (Unsigned Integer)
718 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
719
720 // Vector Compare Mask Greater Than or Equal (Integer)
721 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
722
723 // Vector Compare Mask Higher (Unsigned Integer)
724 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
725
726 // Vector Compare Mask Greater Than (Integer)
727 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
728
729 // Vector Compare Mask Bitwise Test (Integer)
730 defm CMTSTvvv:  NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
731
732 // Vector Compare Mask Less or Same (Unsigned Integer)
733 // CMLS is alias for CMHS with operands reversed.
734 def CMLSvvv_8B  : NeonI_compare_aliases<"cmls", ".8b",  CMHSvvv_8B,  VPR64>;
735 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
736 def CMLSvvv_4H  : NeonI_compare_aliases<"cmls", ".4h",  CMHSvvv_4H,  VPR64>;
737 def CMLSvvv_8H  : NeonI_compare_aliases<"cmls", ".8h",  CMHSvvv_8H,  VPR128>;
738 def CMLSvvv_2S  : NeonI_compare_aliases<"cmls", ".2s",  CMHSvvv_2S,  VPR64>;
739 def CMLSvvv_4S  : NeonI_compare_aliases<"cmls", ".4s",  CMHSvvv_4S,  VPR128>;
740 def CMLSvvv_2D  : NeonI_compare_aliases<"cmls", ".2d",  CMHSvvv_2D,  VPR128>;
741
742 // Vector Compare Mask Less Than or Equal (Integer)
743 // CMLE is alias for CMGE with operands reversed.
744 def CMLEvvv_8B  : NeonI_compare_aliases<"cmle", ".8b",  CMGEvvv_8B,  VPR64>;
745 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
746 def CMLEvvv_4H  : NeonI_compare_aliases<"cmle", ".4h",  CMGEvvv_4H,  VPR64>;
747 def CMLEvvv_8H  : NeonI_compare_aliases<"cmle", ".8h",  CMGEvvv_8H,  VPR128>;
748 def CMLEvvv_2S  : NeonI_compare_aliases<"cmle", ".2s",  CMGEvvv_2S,  VPR64>;
749 def CMLEvvv_4S  : NeonI_compare_aliases<"cmle", ".4s",  CMGEvvv_4S,  VPR128>;
750 def CMLEvvv_2D  : NeonI_compare_aliases<"cmle", ".2d",  CMGEvvv_2D,  VPR128>;
751
752 // Vector Compare Mask Lower (Unsigned Integer)
753 // CMLO is alias for CMHI with operands reversed.
754 def CMLOvvv_8B  : NeonI_compare_aliases<"cmlo", ".8b",  CMHIvvv_8B,  VPR64>;
755 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
756 def CMLOvvv_4H  : NeonI_compare_aliases<"cmlo", ".4h",  CMHIvvv_4H,  VPR64>;
757 def CMLOvvv_8H  : NeonI_compare_aliases<"cmlo", ".8h",  CMHIvvv_8H,  VPR128>;
758 def CMLOvvv_2S  : NeonI_compare_aliases<"cmlo", ".2s",  CMHIvvv_2S,  VPR64>;
759 def CMLOvvv_4S  : NeonI_compare_aliases<"cmlo", ".4s",  CMHIvvv_4S,  VPR128>;
760 def CMLOvvv_2D  : NeonI_compare_aliases<"cmlo", ".2d",  CMHIvvv_2D,  VPR128>;
761
762 // Vector Compare Mask Less Than (Integer)
763 // CMLT is alias for CMGT with operands reversed.
764 def CMLTvvv_8B  : NeonI_compare_aliases<"cmlt", ".8b",  CMGTvvv_8B,  VPR64>;
765 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
766 def CMLTvvv_4H  : NeonI_compare_aliases<"cmlt", ".4h",  CMGTvvv_4H,  VPR64>;
767 def CMLTvvv_8H  : NeonI_compare_aliases<"cmlt", ".8h",  CMGTvvv_8H,  VPR128>;
768 def CMLTvvv_2S  : NeonI_compare_aliases<"cmlt", ".2s",  CMGTvvv_2S,  VPR64>;
769 def CMLTvvv_4S  : NeonI_compare_aliases<"cmlt", ".4s",  CMGTvvv_4S,  VPR128>;
770 def CMLTvvv_2D  : NeonI_compare_aliases<"cmlt", ".2d",  CMGTvvv_2D,  VPR128>;
771
772
773 def neon_uimm0_asmoperand : AsmOperandClass
774 {
775   let Name = "UImm0";
776   let PredicateMethod = "isUImm<0>";
777   let RenderMethod = "addImmOperands";
778 }
779
780 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
781   let ParserMatchClass = neon_uimm0_asmoperand;
782   let PrintMethod = "printNeonUImm0Operand";
783
784 }
785
786 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
787 {
788   def _8B :  NeonI_2VMisc<0b0, u, 0b00, opcode,
789              (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
790              asmop # "\t$Rd.8b, $Rn.8b, $Imm",
791              [(set (v8i8 VPR64:$Rd),
792                 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
793              NoItinerary>,
794              Sched<[WriteFPALU, ReadFPALU]>;
795
796   def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
797              (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
798              asmop # "\t$Rd.16b, $Rn.16b, $Imm",
799              [(set (v16i8 VPR128:$Rd),
800                 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
801              NoItinerary>,
802              Sched<[WriteFPALU, ReadFPALU]>;
803
804   def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
805             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
806             asmop # "\t$Rd.4h, $Rn.4h, $Imm",
807             [(set (v4i16 VPR64:$Rd),
808                (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
809             NoItinerary>,
810             Sched<[WriteFPALU, ReadFPALU]>;
811
812   def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
813             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
814             asmop # "\t$Rd.8h, $Rn.8h, $Imm",
815             [(set (v8i16 VPR128:$Rd),
816                (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
817             NoItinerary>,
818             Sched<[WriteFPALU, ReadFPALU]>;
819
820   def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
821             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
822             asmop # "\t$Rd.2s, $Rn.2s, $Imm",
823             [(set (v2i32 VPR64:$Rd),
824                (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
825             NoItinerary>,
826             Sched<[WriteFPALU, ReadFPALU]>;
827
828   def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
829             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
830             asmop # "\t$Rd.4s, $Rn.4s, $Imm",
831             [(set (v4i32 VPR128:$Rd),
832                (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
833             NoItinerary>,
834             Sched<[WriteFPALU, ReadFPALU]>;
835
836   def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
837             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
838             asmop # "\t$Rd.2d, $Rn.2d, $Imm",
839             [(set (v2i64 VPR128:$Rd),
840                (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
841             NoItinerary>,
842             Sched<[WriteFPALU, ReadFPALU]>;
843 }
844
845 // Vector Compare Mask Equal to Zero (Integer)
846 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
847
848 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
849 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
850
851 // Vector Compare Mask Greater Than Zero (Signed Integer)
852 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
853
854 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
855 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
856
857 // Vector Compare Mask Less Than Zero (Signed Integer)
858 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
859
860 // Vector Comparisons (Floating Point)
861
862 // Vector Compare Mask Equal (Floating Point)
863 let isCommutable =1 in {
864 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
865                                       v2i32, v4i32, v2i64, 0>;
866 }
867
868 // Vector Compare Mask Greater Than Or Equal (Floating Point)
869 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
870                                       v2i32, v4i32, v2i64, 0>;
871
872 // Vector Compare Mask Greater Than (Floating Point)
873 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
874                                       v2i32, v4i32, v2i64, 0>;
875
876 // Vector Compare Mask Less Than Or Equal (Floating Point)
877 // FCMLE is alias for FCMGE with operands reversed.
878 def FCMLEvvv_2S  : NeonI_compare_aliases<"fcmle", ".2s",  FCMGEvvv_2S,  VPR64>;
879 def FCMLEvvv_4S  : NeonI_compare_aliases<"fcmle", ".4s",  FCMGEvvv_4S,  VPR128>;
880 def FCMLEvvv_2D  : NeonI_compare_aliases<"fcmle", ".2d",  FCMGEvvv_2D,  VPR128>;
881
882 // Vector Compare Mask Less Than (Floating Point)
883 // FCMLT is alias for FCMGT with operands reversed.
884 def FCMLTvvv_2S  : NeonI_compare_aliases<"fcmlt", ".2s",  FCMGTvvv_2S,  VPR64>;
885 def FCMLTvvv_4S  : NeonI_compare_aliases<"fcmlt", ".4s",  FCMGTvvv_4S,  VPR128>;
886 def FCMLTvvv_2D  : NeonI_compare_aliases<"fcmlt", ".2d",  FCMGTvvv_2D,  VPR128>;
887
888 def fpzero_izero_asmoperand : AsmOperandClass {
889   let Name = "FPZeroIZero";
890   let ParserMethod = "ParseFPImm0AndImm0Operand";
891   let DiagnosticType = "FPZero";
892 }
893
894 def fpzz32 : Operand<f32>,
895              ComplexPattern<f32, 1, "SelectFPZeroOperand", [fpimm]> {
896   let ParserMatchClass = fpzero_izero_asmoperand;
897   let PrintMethod = "printFPZeroOperand";
898   let DecoderMethod = "DecodeFPZeroOperand";
899 }
900
901 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
902                               string asmop, CondCode CC>
903 {
904   def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
905             (outs VPR64:$Rd), (ins VPR64:$Rn, fpzz32:$FPImm),
906             asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
907             [(set (v2i32 VPR64:$Rd),
908                (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpzz32:$FPImm), CC)))],
909             NoItinerary>,
910             Sched<[WriteFPALU, ReadFPALU]>;
911
912   def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
913             (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm),
914             asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
915             [(set (v4i32 VPR128:$Rd),
916                (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))],
917             NoItinerary>,
918             Sched<[WriteFPALU, ReadFPALU]>;
919
920   def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
921             (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm),
922             asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
923             [(set (v2i64 VPR128:$Rd),
924                (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))],
925             NoItinerary>,
926             Sched<[WriteFPALU, ReadFPALU]>;
927 }
928
929 // Vector Compare Mask Equal to Zero (Floating Point)
930 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
931
932 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
933 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
934
935 // Vector Compare Mask Greater Than Zero (Floating Point)
936 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
937
938 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
939 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
940
941 // Vector Compare Mask Less Than Zero (Floating Point)
942 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
943
944 // Vector Absolute Comparisons (Floating Point)
945
946 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
947 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
948                                       int_arm_neon_vacge,
949                                       v2i32, v4i32, v2i64, 0>;
950
951 // Vector Absolute Compare Mask Greater Than (Floating Point)
952 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
953                                       int_arm_neon_vacgt,
954                                       v2i32, v4i32, v2i64, 0>;
955
956 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
957 // FACLE is alias for FACGE with operands reversed.
958 def FACLEvvv_2S  : NeonI_compare_aliases<"facle", ".2s",  FACGEvvv_2S,  VPR64>;
959 def FACLEvvv_4S  : NeonI_compare_aliases<"facle", ".4s",  FACGEvvv_4S,  VPR128>;
960 def FACLEvvv_2D  : NeonI_compare_aliases<"facle", ".2d",  FACGEvvv_2D,  VPR128>;
961
962 // Vector Absolute Compare Mask Less Than (Floating Point)
963 // FACLT is alias for FACGT with operands reversed.
964 def FACLTvvv_2S  : NeonI_compare_aliases<"faclt", ".2s",  FACGTvvv_2S,  VPR64>;
965 def FACLTvvv_4S  : NeonI_compare_aliases<"faclt", ".4s",  FACGTvvv_4S,  VPR128>;
966 def FACLTvvv_2D  : NeonI_compare_aliases<"faclt", ".2d",  FACGTvvv_2D,  VPR128>;
967
968 // Vector halving add (Integer Signed, Unsigned)
969 defm SHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
970                                         int_arm_neon_vhadds, 1>;
971 defm UHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
972                                         int_arm_neon_vhaddu, 1>;
973
974 // Vector halving sub (Integer Signed, Unsigned)
975 defm SHSUBvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
976                                         int_arm_neon_vhsubs, 0>;
977 defm UHSUBvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
978                                         int_arm_neon_vhsubu, 0>;
979
980 // Vector rouding halving add (Integer Signed, Unsigned)
981 defm SRHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
982                                          int_arm_neon_vrhadds, 1>;
983 defm URHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
984                                          int_arm_neon_vrhaddu, 1>;
985
986 // Vector Saturating add (Integer Signed, Unsigned)
987 defm SQADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
988                    int_arm_neon_vqadds, 1>;
989 defm UQADDvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
990                    int_arm_neon_vqaddu, 1>;
991
992 // Vector Saturating sub (Integer Signed, Unsigned)
993 defm SQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
994                    int_arm_neon_vqsubs, 1>;
995 defm UQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
996                    int_arm_neon_vqsubu, 1>;
997
998 // Vector Shift Left (Signed and Unsigned Integer)
999 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
1000                  int_arm_neon_vshifts, 1>;
1001 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
1002                  int_arm_neon_vshiftu, 1>;
1003
1004 // Vector Saturating Shift Left (Signed and Unsigned Integer)
1005 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
1006                   int_arm_neon_vqshifts, 1>;
1007 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
1008                   int_arm_neon_vqshiftu, 1>;
1009
1010 // Vector Rouding Shift Left (Signed and Unsigned Integer)
1011 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
1012                   int_arm_neon_vrshifts, 1>;
1013 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
1014                   int_arm_neon_vrshiftu, 1>;
1015
1016 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
1017 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
1018                    int_arm_neon_vqrshifts, 1>;
1019 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
1020                    int_arm_neon_vqrshiftu, 1>;
1021
1022 // Vector Maximum (Signed and Unsigned Integer)
1023 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
1024 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
1025
1026 // Vector Minimum (Signed and Unsigned Integer)
1027 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
1028 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
1029
1030 // Vector Maximum (Floating Point)
1031 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
1032                                      int_arm_neon_vmaxs,
1033                                      v2f32, v4f32, v2f64, 1>;
1034
1035 // Vector Minimum (Floating Point)
1036 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
1037                                      int_arm_neon_vmins,
1038                                      v2f32, v4f32, v2f64, 1>;
1039
1040 // Vector maxNum (Floating Point) -  prefer a number over a quiet NaN)
1041 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
1042                                        int_aarch64_neon_vmaxnm,
1043                                        v2f32, v4f32, v2f64, 1>;
1044
1045 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
1046 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
1047                                        int_aarch64_neon_vminnm,
1048                                        v2f32, v4f32, v2f64, 1>;
1049
1050 // Vector Maximum Pairwise (Signed and Unsigned Integer)
1051 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
1052 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
1053
1054 // Vector Minimum Pairwise (Signed and Unsigned Integer)
1055 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
1056 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
1057
1058 // Vector Maximum Pairwise (Floating Point)
1059 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
1060                                      int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
1061
1062 // Vector Minimum Pairwise (Floating Point)
1063 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
1064                                      int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
1065
1066 // Vector maxNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
1067 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
1068                                        int_aarch64_neon_vpmaxnm,
1069                                        v2f32, v4f32, v2f64, 1>;
1070
1071 // Vector minNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
1072 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
1073                                        int_aarch64_neon_vpminnm,
1074                                        v2f32, v4f32, v2f64, 1>;
1075
1076 // Vector Addition Pairwise (Integer)
1077 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
1078
1079 // Vector Addition Pairwise (Floating Point)
1080 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
1081                                        int_arm_neon_vpadd,
1082                                        v2f32, v4f32, v2f64, 1>;
1083
1084 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
1085 // Vector Saturating Doubling Multiply High
1086 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
1087                     int_arm_neon_vqdmulh, 1>;
1088
1089 // Vector Saturating Rouding Doubling Multiply High
1090 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
1091                      int_arm_neon_vqrdmulh, 1>;
1092
1093 // Vector Multiply Extended (Floating Point)
1094 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
1095                                       int_aarch64_neon_vmulx,
1096                                       v2f32, v4f32, v2f64, 1>;
1097 }
1098
1099 // Patterns to match llvm.aarch64.* intrinsic for 
1100 // ADDP, SMINP, UMINP, SMAXP, UMAXP having i32 as output
1101 class Neon_VectorPair_v2i32_pattern<SDPatternOperator opnode, Instruction INST>
1102   : Pat<(v1i32 (opnode (v2i32 VPR64:$Rn))),
1103         (EXTRACT_SUBREG
1104              (v2i32 (INST (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rn))),
1105              sub_32)>;
1106
1107 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_sminv, SMINPvvv_2S>;
1108 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_uminv, UMINPvvv_2S>;
1109 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_smaxv, SMAXPvvv_2S>;
1110 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_umaxv, UMAXPvvv_2S>;
1111 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_vaddv, ADDP_2S>;
1112
1113 // Vector Immediate Instructions
1114
1115 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
1116 {
1117   def _asmoperand : AsmOperandClass
1118     {
1119       let Name = "NeonMovImmShift" # PREFIX;
1120       let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
1121       let PredicateMethod = "isNeonMovImmShift" # PREFIX;
1122     }
1123 }
1124
1125 // Definition of vector immediates shift operands
1126
1127 // The selectable use-cases extract the shift operation
1128 // information from the OpCmode fields encoded in the immediate.
1129 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
1130   uint64_t OpCmode = N->getZExtValue();
1131   unsigned ShiftImm;
1132   unsigned ShiftOnesIn;
1133   unsigned HasShift =
1134     A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1135   if (!HasShift) return SDValue();
1136   return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
1137 }]>;
1138
1139 // Vector immediates shift operands which accept LSL and MSL
1140 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
1141 // or 0, 8 (LSLH) or 8, 16 (MSL).
1142 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
1143 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
1144 // LSLH restricts shift amount to  0, 8 out of 0, 8, 16, 24
1145 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
1146
1147 multiclass neon_mov_imm_shift_operands<string PREFIX,
1148                                        string HALF, string ISHALF, code pred>
1149 {
1150    def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
1151     {
1152       let PrintMethod =
1153         "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1154       let DecoderMethod =
1155         "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1156       let ParserMatchClass =
1157         !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1158     }
1159 }
1160
1161 defm neon_mov_imm_LSL  : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1162   unsigned ShiftImm;
1163   unsigned ShiftOnesIn;
1164   unsigned HasShift =
1165     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1166   return (HasShift && !ShiftOnesIn);
1167 }]>;
1168
1169 defm neon_mov_imm_MSL  : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1170   unsigned ShiftImm;
1171   unsigned ShiftOnesIn;
1172   unsigned HasShift =
1173     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1174   return (HasShift && ShiftOnesIn);
1175 }]>;
1176
1177 defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1178   unsigned ShiftImm;
1179   unsigned ShiftOnesIn;
1180   unsigned HasShift =
1181     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1182   return (HasShift && !ShiftOnesIn);
1183 }]>;
1184
1185 def neon_uimm1_asmoperand : AsmOperandClass
1186 {
1187   let Name = "UImm1";
1188   let PredicateMethod = "isUImm<1>";
1189   let RenderMethod = "addImmOperands";
1190 }
1191
1192 def neon_uimm2_asmoperand : AsmOperandClass
1193 {
1194   let Name = "UImm2";
1195   let PredicateMethod = "isUImm<2>";
1196   let RenderMethod = "addImmOperands";
1197 }
1198
1199 def neon_uimm8_asmoperand : AsmOperandClass
1200 {
1201   let Name = "UImm8";
1202   let PredicateMethod = "isUImm<8>";
1203   let RenderMethod = "addImmOperands";
1204 }
1205
1206 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1207   let ParserMatchClass = neon_uimm8_asmoperand;
1208   let PrintMethod = "printUImmHexOperand";
1209 }
1210
1211 def neon_uimm64_mask_asmoperand : AsmOperandClass
1212 {
1213   let Name = "NeonUImm64Mask";
1214   let PredicateMethod = "isNeonUImm64Mask";
1215   let RenderMethod = "addNeonUImm64MaskOperands";
1216 }
1217
1218 // MCOperand for 64-bit bytemask with each byte having only the
1219 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1220 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1221   let ParserMatchClass = neon_uimm64_mask_asmoperand;
1222   let PrintMethod = "printNeonUImm64MaskOperand";
1223 }
1224
1225 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1226                                    SDPatternOperator opnode>
1227 {
1228     // shift zeros, per word
1229     def _2S  : NeonI_1VModImm<0b0, op,
1230                               (outs VPR64:$Rd),
1231                               (ins neon_uimm8:$Imm,
1232                                 neon_mov_imm_LSL_operand:$Simm),
1233                               !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1234                               [(set (v2i32 VPR64:$Rd),
1235                                  (v2i32 (opnode (timm:$Imm),
1236                                    (neon_mov_imm_LSL_operand:$Simm))))],
1237                               NoItinerary>,
1238                Sched<[WriteFPALU]> {
1239        bits<2> Simm;
1240        let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1241      }
1242
1243     def _4S  : NeonI_1VModImm<0b1, op,
1244                               (outs VPR128:$Rd),
1245                               (ins neon_uimm8:$Imm,
1246                                 neon_mov_imm_LSL_operand:$Simm),
1247                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1248                               [(set (v4i32 VPR128:$Rd),
1249                                  (v4i32 (opnode (timm:$Imm),
1250                                    (neon_mov_imm_LSL_operand:$Simm))))],
1251                               NoItinerary>,
1252                Sched<[WriteFPALU]> {
1253       bits<2> Simm;
1254       let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1255     }
1256
1257     // shift zeros, per halfword
1258     def _4H  : NeonI_1VModImm<0b0, op,
1259                               (outs VPR64:$Rd),
1260                               (ins neon_uimm8:$Imm,
1261                                 neon_mov_imm_LSLH_operand:$Simm),
1262                               !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1263                               [(set (v4i16 VPR64:$Rd),
1264                                  (v4i16 (opnode (timm:$Imm),
1265                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1266                               NoItinerary>,
1267                Sched<[WriteFPALU]> {
1268       bit  Simm;
1269       let cmode = {0b1, 0b0, Simm, 0b0};
1270     }
1271
1272     def _8H  : NeonI_1VModImm<0b1, op,
1273                               (outs VPR128:$Rd),
1274                               (ins neon_uimm8:$Imm,
1275                                 neon_mov_imm_LSLH_operand:$Simm),
1276                               !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1277                               [(set (v8i16 VPR128:$Rd),
1278                                  (v8i16 (opnode (timm:$Imm),
1279                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1280                               NoItinerary>,
1281                Sched<[WriteFPALU]> {
1282       bit Simm;
1283       let cmode = {0b1, 0b0, Simm, 0b0};
1284      }
1285 }
1286
1287 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1288                                                    SDPatternOperator opnode,
1289                                                    SDPatternOperator neonopnode>
1290 {
1291   let Constraints = "$src = $Rd" in {
1292     // shift zeros, per word
1293     def _2S  : NeonI_1VModImm<0b0, op,
1294                  (outs VPR64:$Rd),
1295                  (ins VPR64:$src, neon_uimm8:$Imm,
1296                    neon_mov_imm_LSL_operand:$Simm),
1297                  !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1298                  [(set (v2i32 VPR64:$Rd),
1299                     (v2i32 (opnode (v2i32 VPR64:$src),
1300                       (v2i32 (neonopnode timm:$Imm,
1301                         neon_mov_imm_LSL_operand:$Simm)))))],
1302                  NoItinerary>,
1303                Sched<[WriteFPALU, ReadFPALU]> {
1304       bits<2> Simm;
1305       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1306     }
1307
1308     def _4S  : NeonI_1VModImm<0b1, op,
1309                  (outs VPR128:$Rd),
1310                  (ins VPR128:$src, neon_uimm8:$Imm,
1311                    neon_mov_imm_LSL_operand:$Simm),
1312                  !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1313                  [(set (v4i32 VPR128:$Rd),
1314                     (v4i32 (opnode (v4i32 VPR128:$src),
1315                       (v4i32 (neonopnode timm:$Imm,
1316                         neon_mov_imm_LSL_operand:$Simm)))))],
1317                  NoItinerary>,
1318                Sched<[WriteFPALU, ReadFPALU]> {
1319       bits<2> Simm;
1320       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1321     }
1322
1323     // shift zeros, per halfword
1324     def _4H  : NeonI_1VModImm<0b0, op,
1325                  (outs VPR64:$Rd),
1326                  (ins VPR64:$src, neon_uimm8:$Imm,
1327                    neon_mov_imm_LSLH_operand:$Simm),
1328                  !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1329                  [(set (v4i16 VPR64:$Rd),
1330                     (v4i16 (opnode (v4i16 VPR64:$src),
1331                        (v4i16 (neonopnode timm:$Imm,
1332                           neon_mov_imm_LSL_operand:$Simm)))))],
1333                  NoItinerary>,
1334                Sched<[WriteFPALU, ReadFPALU]> {
1335       bit  Simm;
1336       let cmode = {0b1, 0b0, Simm, 0b1};
1337     }
1338
1339     def _8H  : NeonI_1VModImm<0b1, op,
1340                  (outs VPR128:$Rd),
1341                  (ins VPR128:$src, neon_uimm8:$Imm,
1342                    neon_mov_imm_LSLH_operand:$Simm),
1343                  !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1344                  [(set (v8i16 VPR128:$Rd),
1345                     (v8i16 (opnode (v8i16 VPR128:$src),
1346                       (v8i16 (neonopnode timm:$Imm,
1347                         neon_mov_imm_LSL_operand:$Simm)))))],
1348                  NoItinerary>,
1349                Sched<[WriteFPALU, ReadFPALU]> {
1350       bit Simm;
1351       let cmode = {0b1, 0b0, Simm, 0b1};
1352     }
1353   }
1354 }
1355
1356 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1357                                    SDPatternOperator opnode>
1358 {
1359     // shift ones, per word
1360     def _2S  : NeonI_1VModImm<0b0, op,
1361                              (outs VPR64:$Rd),
1362                              (ins neon_uimm8:$Imm,
1363                                neon_mov_imm_MSL_operand:$Simm),
1364                              !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1365                               [(set (v2i32 VPR64:$Rd),
1366                                  (v2i32 (opnode (timm:$Imm),
1367                                    (neon_mov_imm_MSL_operand:$Simm))))],
1368                              NoItinerary>,
1369                Sched<[WriteFPALU]> {
1370        bit Simm;
1371        let cmode = {0b1, 0b1, 0b0, Simm};
1372      }
1373
1374    def _4S  : NeonI_1VModImm<0b1, op,
1375                               (outs VPR128:$Rd),
1376                               (ins neon_uimm8:$Imm,
1377                                 neon_mov_imm_MSL_operand:$Simm),
1378                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1379                               [(set (v4i32 VPR128:$Rd),
1380                                  (v4i32 (opnode (timm:$Imm),
1381                                    (neon_mov_imm_MSL_operand:$Simm))))],
1382                               NoItinerary>,
1383               Sched<[WriteFPALU]> {
1384      bit Simm;
1385      let cmode = {0b1, 0b1, 0b0, Simm};
1386    }
1387 }
1388
1389 // Vector Move Immediate Shifted
1390 let isReMaterializable = 1 in {
1391 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1392 }
1393
1394 // Vector Move Inverted Immediate Shifted
1395 let isReMaterializable = 1 in {
1396 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1397 }
1398
1399 // Vector Bitwise Bit Clear (AND NOT) - immediate
1400 let isReMaterializable = 1 in {
1401 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1402                                                          and, Neon_mvni>;
1403 }
1404
1405 // Vector Bitwise OR - immedidate
1406
1407 let isReMaterializable = 1 in {
1408 defm ORRvi_lsl   : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1409                                                            or, Neon_movi>;
1410 }
1411
1412 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1413 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1414 // BIC immediate instructions selection requires additional patterns to
1415 // transform Neon_movi operands into BIC immediate operands
1416
1417 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1418   uint64_t OpCmode = N->getZExtValue();
1419   unsigned ShiftImm;
1420   unsigned ShiftOnesIn;
1421   (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1422   // LSLH restricts shift amount to  0, 8 which are encoded as 0 and 1
1423   // Transform encoded shift amount 0 to 1 and 1 to 0.
1424   return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1425 }]>;
1426
1427 def neon_mov_imm_LSLH_transform_operand
1428   : ImmLeaf<i32, [{
1429     unsigned ShiftImm;
1430     unsigned ShiftOnesIn;
1431     unsigned HasShift =
1432       A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1433     return (HasShift && !ShiftOnesIn); }],
1434   neon_mov_imm_LSLH_transform_XFORM>;
1435
1436 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0xff, LSL 8)
1437 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0xff)
1438 def : Pat<(v4i16 (and VPR64:$src,
1439             (v4i16 (Neon_movi 255,
1440               neon_mov_imm_LSLH_transform_operand:$Simm)))),
1441           (BICvi_lsl_4H VPR64:$src, 255,
1442             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1443
1444 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0xff, LSL 8)
1445 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0xff)
1446 def : Pat<(v8i16 (and VPR128:$src,
1447             (v8i16 (Neon_movi 255,
1448               neon_mov_imm_LSLH_transform_operand:$Simm)))),
1449           (BICvi_lsl_8H VPR128:$src, 255,
1450             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1451
1452 def : Pat<(v8i8 (and VPR64:$src,
1453                   (bitconvert(v4i16 (Neon_movi 255,
1454                     neon_mov_imm_LSLH_transform_operand:$Simm))))),
1455           (BICvi_lsl_4H VPR64:$src, 255,
1456             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1457 def : Pat<(v2i32 (and VPR64:$src,
1458                  (bitconvert(v4i16 (Neon_movi 255,
1459                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1460           (BICvi_lsl_4H VPR64:$src, 255,
1461             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1462 def : Pat<(v1i64 (and VPR64:$src,
1463                 (bitconvert(v4i16 (Neon_movi 255,
1464                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
1465         (BICvi_lsl_4H VPR64:$src, 255,
1466           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1467
1468 def : Pat<(v16i8 (and VPR128:$src,
1469                  (bitconvert(v8i16 (Neon_movi 255,
1470                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1471         (BICvi_lsl_8H VPR128:$src, 255,
1472           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1473 def : Pat<(v4i32 (and VPR128:$src,
1474                  (bitconvert(v8i16 (Neon_movi 255,
1475                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1476         (BICvi_lsl_8H VPR128:$src, 255,
1477           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1478 def : Pat<(v2i64 (and VPR128:$src,
1479                  (bitconvert(v8i16 (Neon_movi 255,
1480                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1481         (BICvi_lsl_8H VPR128:$src, 255,
1482           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1483
1484 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1485                                    SDPatternOperator neonopnode,
1486                                    Instruction INST4H,
1487                                    Instruction INST8H,
1488                                    Instruction INST2S,
1489                                    Instruction INST4S> {
1490   def : Pat<(v8i8 (opnode VPR64:$src,
1491                     (bitconvert(v4i16 (neonopnode timm:$Imm,
1492                       neon_mov_imm_LSLH_operand:$Simm))))),
1493             (INST4H VPR64:$src, neon_uimm8:$Imm,
1494               neon_mov_imm_LSLH_operand:$Simm)>;
1495   def : Pat<(v2i32 (opnode VPR64:$src,
1496                    (bitconvert(v4i16 (neonopnode timm:$Imm,
1497                      neon_mov_imm_LSLH_operand:$Simm))))),
1498             (INST4H VPR64:$src, neon_uimm8:$Imm,
1499               neon_mov_imm_LSLH_operand:$Simm)>;
1500   def : Pat<(v1i64 (opnode VPR64:$src,
1501                   (bitconvert(v4i16 (neonopnode timm:$Imm,
1502                     neon_mov_imm_LSLH_operand:$Simm))))),
1503           (INST4H VPR64:$src, neon_uimm8:$Imm,
1504             neon_mov_imm_LSLH_operand:$Simm)>;
1505
1506   def : Pat<(v16i8 (opnode VPR128:$src,
1507                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1508                      neon_mov_imm_LSLH_operand:$Simm))))),
1509           (INST8H VPR128:$src, neon_uimm8:$Imm,
1510             neon_mov_imm_LSLH_operand:$Simm)>;
1511   def : Pat<(v4i32 (opnode VPR128:$src,
1512                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1513                      neon_mov_imm_LSLH_operand:$Simm))))),
1514           (INST8H VPR128:$src, neon_uimm8:$Imm,
1515             neon_mov_imm_LSLH_operand:$Simm)>;
1516   def : Pat<(v2i64 (opnode VPR128:$src,
1517                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1518                      neon_mov_imm_LSLH_operand:$Simm))))),
1519           (INST8H VPR128:$src, neon_uimm8:$Imm,
1520             neon_mov_imm_LSLH_operand:$Simm)>;
1521
1522   def : Pat<(v8i8 (opnode VPR64:$src,
1523                     (bitconvert(v2i32 (neonopnode timm:$Imm,
1524                       neon_mov_imm_LSLH_operand:$Simm))))),
1525             (INST2S VPR64:$src, neon_uimm8:$Imm,
1526               neon_mov_imm_LSLH_operand:$Simm)>;
1527   def : Pat<(v4i16 (opnode VPR64:$src,
1528                    (bitconvert(v2i32 (neonopnode timm:$Imm,
1529                      neon_mov_imm_LSLH_operand:$Simm))))),
1530             (INST2S VPR64:$src, neon_uimm8:$Imm,
1531               neon_mov_imm_LSLH_operand:$Simm)>;
1532   def : Pat<(v1i64 (opnode VPR64:$src,
1533                   (bitconvert(v2i32 (neonopnode timm:$Imm,
1534                     neon_mov_imm_LSLH_operand:$Simm))))),
1535           (INST2S VPR64:$src, neon_uimm8:$Imm,
1536             neon_mov_imm_LSLH_operand:$Simm)>;
1537
1538   def : Pat<(v16i8 (opnode VPR128:$src,
1539                    (bitconvert(v4i32 (neonopnode timm:$Imm,
1540                      neon_mov_imm_LSLH_operand:$Simm))))),
1541           (INST4S VPR128:$src, neon_uimm8:$Imm,
1542             neon_mov_imm_LSLH_operand:$Simm)>;
1543   def : Pat<(v8i16 (opnode VPR128:$src,
1544                    (bitconvert(v4i32 (neonopnode timm:$Imm,
1545                      neon_mov_imm_LSLH_operand:$Simm))))),
1546           (INST4S VPR128:$src, neon_uimm8:$Imm,
1547             neon_mov_imm_LSLH_operand:$Simm)>;
1548   def : Pat<(v2i64 (opnode VPR128:$src,
1549                    (bitconvert(v4i32 (neonopnode timm:$Imm,
1550                      neon_mov_imm_LSLH_operand:$Simm))))),
1551           (INST4S VPR128:$src, neon_uimm8:$Imm,
1552             neon_mov_imm_LSLH_operand:$Simm)>;
1553 }
1554
1555 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1556 defm : Neon_bitwiseVi_patterns<and, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H,
1557                                BICvi_lsl_2S, BICvi_lsl_4S>;
1558
1559 // Additional patterns for Vector Bitwise OR - immedidate
1560 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H,
1561                                ORRvi_lsl_2S, ORRvi_lsl_4S>;
1562
1563
1564 // Vector Move Immediate Masked
1565 let isReMaterializable = 1 in {
1566 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1567 }
1568
1569 // Vector Move Inverted Immediate Masked
1570 let isReMaterializable = 1 in {
1571 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1572 }
1573
1574 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1575                                 Instruction inst, RegisterOperand VPRC>
1576   : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
1577                         (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
1578
1579 // Aliases for Vector Move Immediate Shifted
1580 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1581 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1582 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1583 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1584
1585 // Aliases for Vector Move Inverted Immediate Shifted
1586 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1587 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1588 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1589 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1590
1591 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1592 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1593 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1594 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1595 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1596
1597 // Aliases for Vector Bitwise OR - immedidate
1598 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1599 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1600 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1601 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1602
1603 //  Vector Move Immediate - per byte
1604 let isReMaterializable = 1 in {
1605 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1606                                (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1607                                "movi\t$Rd.8b, $Imm",
1608                                [(set (v8i8 VPR64:$Rd),
1609                                   (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1610                                 NoItinerary>,
1611                 Sched<[WriteFPALU]> {
1612   let cmode = 0b1110;
1613 }
1614
1615 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1616                                 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1617                                 "movi\t$Rd.16b, $Imm",
1618                                 [(set (v16i8 VPR128:$Rd),
1619                                    (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1620                                  NoItinerary>,
1621                 Sched<[WriteFPALU]> {
1622   let cmode = 0b1110;
1623 }
1624 }
1625
1626 // Vector Move Immediate - bytemask, per double word
1627 let isReMaterializable = 1 in {
1628 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1629                                (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1630                                "movi\t $Rd.2d, $Imm",
1631                                [(set (v2i64 VPR128:$Rd),
1632                                   (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1633                                NoItinerary>,
1634                 Sched<[WriteFPALU]> {
1635   let cmode = 0b1110;
1636 }
1637 }
1638
1639 // Vector Move Immediate - bytemask, one doubleword
1640
1641 let isReMaterializable = 1 in {
1642 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1643                            (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1644                            "movi\t $Rd, $Imm",
1645                            [(set (v1i64 FPR64:$Rd),
1646                              (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1647                            NoItinerary>,
1648              Sched<[WriteFPALU]> {
1649   let cmode = 0b1110;
1650 }
1651 }
1652
1653 // Vector Floating Point Move Immediate
1654
1655 class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
1656                       Operand immOpType, bit q, bit op>
1657   : NeonI_1VModImm<q, op,
1658                    (outs VPRC:$Rd), (ins immOpType:$Imm),
1659                    "fmov\t$Rd" # asmlane # ", $Imm",
1660                    [(set (OpTy VPRC:$Rd),
1661                       (OpTy (Neon_fmovi (timm:$Imm))))],
1662                    NoItinerary>,
1663     Sched<[WriteFPALU]> {
1664      let cmode = 0b1111;
1665    }
1666
1667 let isReMaterializable = 1 in {
1668 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64,  v2f32, fmov32_operand, 0b0, 0b0>;
1669 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1670 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1671 }
1672
1673 // Vector Shift (Immediate)
1674
1675 // Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
1676 // as follows:
1677 //
1678 //    Offset    Encoding
1679 //     8        immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
1680 //     16       immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
1681 //     32       immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
1682 //     64       immh:immb<6>   = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
1683 //
1684 // The shift right immediate amount, in the range 1 to element bits, is computed
1685 // as Offset - UInt(immh:immb).  The shift left immediate amount, in the range 0
1686 // to element bits - 1, is computed as UInt(immh:immb) - Offset.
1687
1688 class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
1689   let Name = "ShrImm" # OFFSET;
1690   let RenderMethod = "addImmOperands";
1691   let DiagnosticType = "ShrImm" # OFFSET;
1692 }
1693
1694 class shr_imm<string OFFSET> : Operand<i32> {
1695   let EncoderMethod = "getShiftRightImm" # OFFSET;
1696   let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
1697   let ParserMatchClass =
1698     !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
1699 }
1700
1701 def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
1702 def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
1703 def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
1704 def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
1705
1706 def shr_imm8 : shr_imm<"8">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 8;}]>;
1707 def shr_imm16 : shr_imm<"16">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 16;}]>;
1708 def shr_imm32 : shr_imm<"32">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 32;}]>;
1709 def shr_imm64 : shr_imm<"64">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 64;}]>;
1710
1711 class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
1712   let Name = "ShlImm" # OFFSET;
1713   let RenderMethod = "addImmOperands";
1714   let DiagnosticType = "ShlImm" # OFFSET;
1715 }
1716
1717 class shl_imm<string OFFSET> : Operand<i32> {
1718   let EncoderMethod = "getShiftLeftImm" # OFFSET;
1719   let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
1720   let ParserMatchClass =
1721     !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
1722 }
1723
1724 def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
1725 def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
1726 def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
1727 def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
1728
1729 def shl_imm8 : shl_imm<"8">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 8;}]>;
1730 def shl_imm16 : shl_imm<"16">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 16;}]>;
1731 def shl_imm32 : shl_imm<"32">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 32;}]>;
1732 def shl_imm64 : shl_imm<"64">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 64;}]>;
1733
1734 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
1735                RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
1736   : NeonI_2VShiftImm<q, u, opcode,
1737                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1738                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1739                      [(set (Ty VPRC:$Rd),
1740                         (Ty (OpNode (Ty VPRC:$Rn),
1741                           (Ty (Neon_vdup (i32 ImmTy:$Imm))))))],
1742                      NoItinerary>,
1743     Sched<[WriteFPALU, ReadFPALU]>;
1744
1745 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1746   // 64-bit vector types.
1747   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8, shl> {
1748     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1749   }
1750
1751   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16, shl> {
1752     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1753   }
1754
1755   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32, shl> {
1756     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1757   }
1758
1759   // 128-bit vector types.
1760   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8, shl> {
1761     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1762   }
1763
1764   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16, shl> {
1765     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1766   }
1767
1768   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32, shl> {
1769     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1770   }
1771
1772   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64, shl> {
1773     let Inst{22} = 0b1;        // immh:immb = 1xxxxxx
1774   }
1775 }
1776
1777 multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1778   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1779                      OpNode> {
1780     let Inst{22-19} = 0b0001;
1781   }
1782
1783   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1784                      OpNode> {
1785     let Inst{22-20} = 0b001;
1786   }
1787
1788   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1789                      OpNode> {
1790      let Inst{22-21} = 0b01;
1791   }
1792
1793   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1794                       OpNode> {
1795                       let Inst{22-19} = 0b0001;
1796                     }
1797
1798   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1799                      OpNode> {
1800                      let Inst{22-20} = 0b001;
1801                     }
1802
1803   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1804                      OpNode> {
1805                       let Inst{22-21} = 0b01;
1806                     }
1807
1808   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1809                      OpNode> {
1810                       let Inst{22} = 0b1;
1811                     }
1812 }
1813
1814 // Shift left
1815
1816 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1817
1818 // Additional patterns to match vector shift left by immediate.
1819 // (v1i8/v1i16/v1i32 types)
1820 def : Pat<(v1i8 (shl (v1i8 FPR8:$Rn),
1821                      (v1i8 (Neon_vdup (i32 (shl_imm8:$Imm)))))),
1822           (EXTRACT_SUBREG
1823               (SHLvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
1824                           shl_imm8:$Imm),
1825               sub_8)>;
1826 def : Pat<(v1i16 (shl (v1i16 FPR16:$Rn),
1827                       (v1i16 (Neon_vdup (i32 (shl_imm16:$Imm)))))),
1828           (EXTRACT_SUBREG
1829               (SHLvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
1830                           shl_imm16:$Imm),
1831               sub_16)>;
1832 def : Pat<(v1i32 (shl (v1i32 FPR32:$Rn),
1833                       (v1i32 (Neon_vdup (i32 (shl_imm32:$Imm)))))),
1834           (EXTRACT_SUBREG
1835               (SHLvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
1836                           shl_imm32:$Imm),
1837               sub_32)>;
1838
1839 // Shift right
1840 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
1841 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
1842
1843 // Additional patterns to match vector shift right by immediate.
1844 // (v1i8/v1i16/v1i32 types)
1845 def : Pat<(v1i8 (sra (v1i8 FPR8:$Rn),
1846                      (v1i8 (Neon_vdup (i32 (shr_imm8:$Imm)))))),
1847           (EXTRACT_SUBREG
1848               (SSHRvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
1849                           shr_imm8:$Imm),
1850               sub_8)>;
1851 def : Pat<(v1i16 (sra (v1i16 FPR16:$Rn),
1852                       (v1i16 (Neon_vdup (i32 (shr_imm16:$Imm)))))),
1853           (EXTRACT_SUBREG
1854               (SSHRvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
1855                           shr_imm16:$Imm),
1856               sub_16)>;
1857 def : Pat<(v1i32 (sra (v1i32 FPR32:$Rn),
1858                       (v1i32 (Neon_vdup (i32 (shr_imm32:$Imm)))))),
1859           (EXTRACT_SUBREG
1860               (SSHRvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
1861                           shr_imm32:$Imm),
1862               sub_32)>;
1863 def : Pat<(v1i8 (srl (v1i8 FPR8:$Rn),
1864                      (v1i8 (Neon_vdup (i32 (shr_imm8:$Imm)))))),
1865           (EXTRACT_SUBREG
1866               (USHRvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
1867                           shr_imm8:$Imm),
1868               sub_8)>;
1869 def : Pat<(v1i16 (srl (v1i16 FPR16:$Rn),
1870                       (v1i16 (Neon_vdup (i32 (shr_imm16:$Imm)))))),
1871           (EXTRACT_SUBREG
1872               (USHRvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
1873                           shr_imm16:$Imm),
1874               sub_16)>;
1875 def : Pat<(v1i32 (srl (v1i32 FPR32:$Rn),
1876                       (v1i32 (Neon_vdup (i32 (shr_imm32:$Imm)))))),
1877           (EXTRACT_SUBREG
1878               (USHRvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
1879                           shr_imm32:$Imm),
1880               sub_32)>;
1881
1882 def Neon_High16B : PatFrag<(ops node:$in),
1883                            (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1884 def Neon_High8H  : PatFrag<(ops node:$in),
1885                            (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1886 def Neon_High4S  : PatFrag<(ops node:$in),
1887                            (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1888 def Neon_High2D  : PatFrag<(ops node:$in),
1889                            (extract_subvector (v2i64 node:$in), (iPTR 1))>;
1890 def Neon_High4float : PatFrag<(ops node:$in),
1891                                (extract_subvector (v4f32 node:$in), (iPTR 2))>;
1892 def Neon_High2double : PatFrag<(ops node:$in),
1893                                (extract_subvector (v2f64 node:$in), (iPTR 1))>;
1894
1895 def Neon_Low16B : PatFrag<(ops node:$in),
1896                           (v8i8 (extract_subvector (v16i8 node:$in),
1897                                                    (iPTR 0)))>;
1898 def Neon_Low8H : PatFrag<(ops node:$in),
1899                          (v4i16 (extract_subvector (v8i16 node:$in),
1900                                                    (iPTR 0)))>;
1901 def Neon_Low4S : PatFrag<(ops node:$in),
1902                          (v2i32 (extract_subvector (v4i32 node:$in),
1903                                                    (iPTR 0)))>;
1904 def Neon_Low2D : PatFrag<(ops node:$in),
1905                          (v1i64 (extract_subvector (v2i64 node:$in),
1906                                                    (iPTR 0)))>;
1907 def Neon_Low4float : PatFrag<(ops node:$in),
1908                              (v2f32 (extract_subvector (v4f32 node:$in),
1909                                                        (iPTR 0)))>;
1910 def Neon_Low2double : PatFrag<(ops node:$in),
1911                               (v1f64 (extract_subvector (v2f64 node:$in),
1912                                                         (iPTR 0)))>;
1913
1914 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1915                    string SrcT, ValueType DestTy, ValueType SrcTy,
1916                    Operand ImmTy, SDPatternOperator ExtOp>
1917   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1918                      (ins VPR64:$Rn, ImmTy:$Imm),
1919                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1920                      [(set (DestTy VPR128:$Rd),
1921                         (DestTy (shl
1922                           (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1923                             (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1924                      NoItinerary>,
1925     Sched<[WriteFPALU, ReadFPALU]>;
1926
1927 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1928                        string SrcT, ValueType DestTy, ValueType SrcTy,
1929                        int StartIndex, Operand ImmTy,
1930                        SDPatternOperator ExtOp, PatFrag getTop>
1931   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1932                      (ins VPR128:$Rn, ImmTy:$Imm),
1933                      asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1934                      [(set (DestTy VPR128:$Rd),
1935                         (DestTy (shl
1936                           (DestTy (ExtOp
1937                             (SrcTy (getTop VPR128:$Rn)))),
1938                               (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1939                      NoItinerary>,
1940     Sched<[WriteFPALU, ReadFPALU]>;
1941
1942 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1943                          SDNode ExtOp> {
1944   // 64-bit vector types.
1945   def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1946                          shl_imm8, ExtOp> {
1947     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1948   }
1949
1950   def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1951                          shl_imm16, ExtOp> {
1952     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1953   }
1954
1955   def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1956                          shl_imm32, ExtOp> {
1957     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1958   }
1959
1960   // 128-bit vector types
1961   def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
1962                               8, shl_imm8, ExtOp, Neon_High16B> {
1963     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1964   }
1965
1966   def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
1967                              4, shl_imm16, ExtOp, Neon_High8H> {
1968     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1969   }
1970
1971   def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
1972                              2, shl_imm32, ExtOp, Neon_High4S> {
1973     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1974   }
1975
1976   // Use other patterns to match when the immediate is 0.
1977   def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1978             (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1979
1980   def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1981             (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1982
1983   def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1984             (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1985
1986   def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
1987             (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1988
1989   def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
1990             (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1991
1992   def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
1993             (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1994 }
1995
1996 // Shift left long
1997 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1998 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1999
2000 class NeonI_ext_len_alias<string asmop, string lane, string laneOp,
2001                        Instruction inst, RegisterOperand VPRC,
2002                        RegisterOperand VPRCOp>
2003   : NeonInstAlias<asmop # "\t$Rd" # lane #", $Rn" # laneOp,
2004                   (inst VPRC:$Rd, VPRCOp:$Rn, 0), 0b0>;
2005
2006 // Signed integer lengthen (vector) is alias for SSHLL Vd, Vn, #0
2007 // Signed integer lengthen (vector, second part) is alias for SSHLL2 Vd, Vn, #0
2008 // FIXME: This is actually the preferred syntax but TableGen can't deal with
2009 // custom printing of aliases.
2010 def SXTLvv_8B  : NeonI_ext_len_alias<"sxtl", ".8h", ".8b",  SSHLLvvi_8B, VPR128, VPR64>;
2011 def SXTLvv_4H  : NeonI_ext_len_alias<"sxtl", ".4s", ".4h",  SSHLLvvi_4H, VPR128, VPR64>;
2012 def SXTLvv_2S  : NeonI_ext_len_alias<"sxtl", ".2d", ".2s",  SSHLLvvi_2S, VPR128, VPR64>;
2013 def SXTL2vv_16B : NeonI_ext_len_alias<"sxtl2", ".8h", ".16b",  SSHLLvvi_16B, VPR128, VPR128>;
2014 def SXTL2vv_8H  : NeonI_ext_len_alias<"sxtl2", ".4s", ".8h",  SSHLLvvi_8H, VPR128, VPR128>;
2015 def SXTL2vv_4S  : NeonI_ext_len_alias<"sxtl2", ".2d", ".4s",  SSHLLvvi_4S, VPR128, VPR128>;
2016
2017 // Unsigned integer lengthen (vector) is alias for USHLL Vd, Vn, #0
2018 // Unsigned integer lengthen (vector, second part) is alias for USHLL2 Vd, Vn, #0
2019 // FIXME: This is actually the preferred syntax but TableGen can't deal with
2020 // custom printing of aliases.
2021 def UXTLvv_8B  : NeonI_ext_len_alias<"uxtl", ".8h", ".8b",  USHLLvvi_8B, VPR128, VPR64>;
2022 def UXTLvv_4H  : NeonI_ext_len_alias<"uxtl", ".4s", ".4h",  USHLLvvi_4H, VPR128, VPR64>;
2023 def UXTLvv_2S  : NeonI_ext_len_alias<"uxtl", ".2d", ".2s",  USHLLvvi_2S, VPR128, VPR64>;
2024 def UXTL2vv_16B : NeonI_ext_len_alias<"uxtl2", ".8h", ".16b",  USHLLvvi_16B, VPR128, VPR128>;
2025 def UXTL2vv_8H  : NeonI_ext_len_alias<"uxtl2", ".4s", ".8h",  USHLLvvi_8H, VPR128, VPR128>;
2026 def UXTL2vv_4S  : NeonI_ext_len_alias<"uxtl2", ".2d", ".4s",  USHLLvvi_4S, VPR128, VPR128>;
2027
2028 def : Pat<(v8i16 (anyext (v8i8 VPR64:$Rn))), (USHLLvvi_8B VPR64:$Rn, 0)>;
2029 def : Pat<(v4i32 (anyext (v4i16 VPR64:$Rn))), (USHLLvvi_4H VPR64:$Rn, 0)>;
2030 def : Pat<(v2i64 (anyext (v2i32 VPR64:$Rn))), (USHLLvvi_2S VPR64:$Rn, 0)>;
2031
2032 // Rounding/Saturating shift
2033 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
2034                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2035                   SDPatternOperator OpNode>
2036   : NeonI_2VShiftImm<q, u, opcode,
2037                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2038                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2039                      [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
2040                         (i32 ImmTy:$Imm))))],
2041                      NoItinerary>,
2042     Sched<[WriteFPALU, ReadFPALU]>;
2043
2044 // shift right (vector by immediate)
2045 multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
2046                            SDPatternOperator OpNode> {
2047   def _8B  : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2048                          OpNode> {
2049     let Inst{22-19} = 0b0001;
2050   }
2051
2052   def _4H  : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2053                          OpNode> {
2054     let Inst{22-20} = 0b001;
2055   }
2056
2057   def _2S  : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2058                          OpNode> {
2059     let Inst{22-21} = 0b01;
2060   }
2061
2062   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2063                          OpNode> {
2064     let Inst{22-19} = 0b0001;
2065   }
2066
2067   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2068                         OpNode> {
2069     let Inst{22-20} = 0b001;
2070   }
2071
2072   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2073                         OpNode> {
2074     let Inst{22-21} = 0b01;
2075   }
2076
2077   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2078                         OpNode> {
2079     let Inst{22} = 0b1;
2080   }
2081 }
2082
2083 multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
2084                           SDPatternOperator OpNode> {
2085   // 64-bit vector types.
2086   def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
2087                         OpNode> {
2088     let Inst{22-19} = 0b0001;
2089   }
2090
2091   def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
2092                         OpNode> {
2093     let Inst{22-20} = 0b001;
2094   }
2095
2096   def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
2097                         OpNode> {
2098     let Inst{22-21} = 0b01;
2099   }
2100
2101   // 128-bit vector types.
2102   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
2103                          OpNode> {
2104     let Inst{22-19} = 0b0001;
2105   }
2106
2107   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
2108                         OpNode> {
2109     let Inst{22-20} = 0b001;
2110   }
2111
2112   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
2113                         OpNode> {
2114     let Inst{22-21} = 0b01;
2115   }
2116
2117   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
2118                         OpNode> {
2119     let Inst{22} = 0b1;
2120   }
2121 }
2122
2123 // Rounding shift right
2124 defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
2125                                 int_aarch64_neon_vsrshr>;
2126 defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
2127                                 int_aarch64_neon_vurshr>;
2128
2129 // Saturating shift left unsigned
2130 defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
2131
2132 // Saturating shift left
2133 defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
2134 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
2135
2136 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
2137                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2138                   SDNode OpNode>
2139   : NeonI_2VShiftImm<q, u, opcode,
2140            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
2141            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2142            [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
2143               (Ty (OpNode (Ty VPRC:$Rn),
2144                 (Ty (Neon_vdup (i32 ImmTy:$Imm))))))))],
2145            NoItinerary>,
2146     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2147   let Constraints = "$src = $Rd";
2148 }
2149
2150 // Shift Right accumulate
2151 multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
2152   def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2153                         OpNode> {
2154     let Inst{22-19} = 0b0001;
2155   }
2156
2157   def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2158                         OpNode> {
2159     let Inst{22-20} = 0b001;
2160   }
2161
2162   def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2163                         OpNode> {
2164     let Inst{22-21} = 0b01;
2165   }
2166
2167   def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2168                          OpNode> {
2169     let Inst{22-19} = 0b0001;
2170   }
2171
2172   def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2173                         OpNode> {
2174     let Inst{22-20} = 0b001;
2175   }
2176
2177   def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2178                         OpNode> {
2179     let Inst{22-21} = 0b01;
2180   }
2181
2182   def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2183                         OpNode> {
2184     let Inst{22} = 0b1;
2185   }
2186 }
2187
2188 // Shift right and accumulate
2189 defm SSRAvvi    : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
2190 defm USRAvvi    : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
2191
2192 // Rounding shift accumulate
2193 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
2194                     RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2195                     SDPatternOperator OpNode>
2196   : NeonI_2VShiftImm<q, u, opcode,
2197                      (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
2198                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2199                      [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
2200                         (Ty (OpNode (Ty VPRC:$Rn), (i32 ImmTy:$Imm))))))],
2201                      NoItinerary>,
2202     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2203   let Constraints = "$src = $Rd";
2204 }
2205
2206 multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
2207                              SDPatternOperator OpNode> {
2208   def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2209                           OpNode> {
2210     let Inst{22-19} = 0b0001;
2211   }
2212
2213   def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2214                           OpNode> {
2215     let Inst{22-20} = 0b001;
2216   }
2217
2218   def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2219                           OpNode> {
2220     let Inst{22-21} = 0b01;
2221   }
2222
2223   def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2224                            OpNode> {
2225     let Inst{22-19} = 0b0001;
2226   }
2227
2228   def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2229                           OpNode> {
2230     let Inst{22-20} = 0b001;
2231   }
2232
2233   def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2234                           OpNode> {
2235     let Inst{22-21} = 0b01;
2236   }
2237
2238   def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2239                           OpNode> {
2240     let Inst{22} = 0b1;
2241   }
2242 }
2243
2244 // Rounding shift right and accumulate
2245 defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
2246 defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
2247
2248 // Shift insert by immediate
2249 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
2250                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2251                   SDPatternOperator OpNode>
2252     : NeonI_2VShiftImm<q, u, opcode,
2253            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
2254            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2255            [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
2256              (i32 ImmTy:$Imm))))],
2257            NoItinerary>,
2258       Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2259   let Constraints = "$src = $Rd";
2260 }
2261
2262 // shift left insert (vector by immediate)
2263 multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
2264   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
2265                         int_aarch64_neon_vsli> {
2266     let Inst{22-19} = 0b0001;
2267   }
2268
2269   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
2270                         int_aarch64_neon_vsli> {
2271     let Inst{22-20} = 0b001;
2272   }
2273
2274   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
2275                         int_aarch64_neon_vsli> {
2276     let Inst{22-21} = 0b01;
2277   }
2278
2279     // 128-bit vector types
2280   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
2281                          int_aarch64_neon_vsli> {
2282     let Inst{22-19} = 0b0001;
2283   }
2284
2285   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
2286                         int_aarch64_neon_vsli> {
2287     let Inst{22-20} = 0b001;
2288   }
2289
2290   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
2291                         int_aarch64_neon_vsli> {
2292     let Inst{22-21} = 0b01;
2293   }
2294
2295   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
2296                         int_aarch64_neon_vsli> {
2297     let Inst{22} = 0b1;
2298   }
2299 }
2300
2301 // shift right insert (vector by immediate)
2302 multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
2303     // 64-bit vector types.
2304   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2305                         int_aarch64_neon_vsri> {
2306     let Inst{22-19} = 0b0001;
2307   }
2308
2309   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2310                         int_aarch64_neon_vsri> {
2311     let Inst{22-20} = 0b001;
2312   }
2313
2314   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2315                         int_aarch64_neon_vsri> {
2316     let Inst{22-21} = 0b01;
2317   }
2318
2319     // 128-bit vector types
2320   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2321                          int_aarch64_neon_vsri> {
2322     let Inst{22-19} = 0b0001;
2323   }
2324
2325   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2326                         int_aarch64_neon_vsri> {
2327     let Inst{22-20} = 0b001;
2328   }
2329
2330   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2331                         int_aarch64_neon_vsri> {
2332     let Inst{22-21} = 0b01;
2333   }
2334
2335   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2336                         int_aarch64_neon_vsri> {
2337     let Inst{22} = 0b1;
2338   }
2339 }
2340
2341 // Shift left and insert
2342 defm SLIvvi   : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
2343
2344 // Shift right and insert
2345 defm SRIvvi   : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
2346
2347 class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2348                     string SrcT, Operand ImmTy>
2349   : NeonI_2VShiftImm<q, u, opcode,
2350                      (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
2351                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2352                      [], NoItinerary>,
2353     Sched<[WriteFPALU, ReadFPALU]>;
2354
2355 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2356                        string SrcT, Operand ImmTy>
2357   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
2358                      (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
2359                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2360                      [], NoItinerary>,
2361     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2362   let Constraints = "$src = $Rd";
2363 }
2364
2365 // left long shift by immediate
2366 multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
2367   def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
2368     let Inst{22-19} = 0b0001;
2369   }
2370
2371   def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
2372     let Inst{22-20} = 0b001;
2373   }
2374
2375   def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
2376     let Inst{22-21} = 0b01;
2377   }
2378
2379   // Shift Narrow High
2380   def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
2381                               shr_imm8> {
2382     let Inst{22-19} = 0b0001;
2383   }
2384
2385   def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
2386                              shr_imm16> {
2387     let Inst{22-20} = 0b001;
2388   }
2389
2390   def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
2391                              shr_imm32> {
2392     let Inst{22-21} = 0b01;
2393   }
2394 }
2395
2396 // Shift right narrow
2397 defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
2398
2399 // Shift right narrow (prefix Q is saturating, prefix R is rounding)
2400 defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
2401 defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
2402 defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
2403 defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
2404 defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
2405 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
2406 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
2407
2408 def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
2409                               (v2i64 (concat_vectors (v1i64 node:$Rm),
2410                                                      (v1i64 node:$Rn)))>;
2411 def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
2412                               (v8i16 (concat_vectors (v4i16 node:$Rm),
2413                                                      (v4i16 node:$Rn)))>;
2414 def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
2415                               (v4i32 (concat_vectors (v2i32 node:$Rm),
2416                                                      (v2i32 node:$Rn)))>;
2417 def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
2418                               (v4f32 (concat_vectors (v2f32 node:$Rm),
2419                                                      (v2f32 node:$Rn)))>;
2420 def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
2421                               (v2f64 (concat_vectors (v1f64 node:$Rm),
2422                                                      (v1f64 node:$Rn)))>;
2423
2424 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2425                              (v8i16 (srl (v8i16 node:$lhs),
2426                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2427 def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2428                              (v4i32 (srl (v4i32 node:$lhs),
2429                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2430 def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2431                              (v2i64 (srl (v2i64 node:$lhs),
2432                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2433 def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2434                              (v8i16 (sra (v8i16 node:$lhs),
2435                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2436 def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2437                              (v4i32 (sra (v4i32 node:$lhs),
2438                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2439 def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2440                              (v2i64 (sra (v2i64 node:$lhs),
2441                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2442
2443 // Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
2444 multiclass Neon_shiftNarrow_patterns<string shr> {
2445   def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
2446               (i32 shr_imm8:$Imm)))),
2447             (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
2448   def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
2449               (i32 shr_imm16:$Imm)))),
2450             (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
2451   def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
2452               (i32 shr_imm32:$Imm)))),
2453             (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
2454
2455   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2456               (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
2457                 VPR128:$Rn, (i32 shr_imm8:$Imm))))))),
2458             (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
2459                          VPR128:$Rn, imm:$Imm)>;
2460   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2461               (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
2462                 VPR128:$Rn, (i32 shr_imm16:$Imm))))))),
2463             (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2464                         VPR128:$Rn, imm:$Imm)>;
2465   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2466               (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
2467                 VPR128:$Rn, (i32 shr_imm32:$Imm))))))),
2468             (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2469                         VPR128:$Rn, imm:$Imm)>;
2470 }
2471
2472 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
2473   def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), shr_imm8:$Imm)),
2474             (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
2475   def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), shr_imm16:$Imm)),
2476             (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
2477   def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), shr_imm32:$Imm)),
2478             (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
2479
2480   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2481                 (v1i64 (bitconvert (v8i8
2482                     (op (v8i16 VPR128:$Rn), shr_imm8:$Imm))))),
2483             (!cast<Instruction>(prefix # "_16B")
2484                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2485                 VPR128:$Rn, imm:$Imm)>;
2486   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2487                 (v1i64 (bitconvert (v4i16
2488                     (op (v4i32 VPR128:$Rn), shr_imm16:$Imm))))),
2489             (!cast<Instruction>(prefix # "_8H")
2490                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2491                 VPR128:$Rn, imm:$Imm)>;
2492   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2493                 (v1i64 (bitconvert (v2i32
2494                     (op (v2i64 VPR128:$Rn), shr_imm32:$Imm))))),
2495             (!cast<Instruction>(prefix # "_4S")
2496                   (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2497                   VPR128:$Rn, imm:$Imm)>;
2498 }
2499
2500 defm : Neon_shiftNarrow_patterns<"lshr">;
2501 defm : Neon_shiftNarrow_patterns<"ashr">;
2502
2503 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
2504 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
2505 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
2506 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
2507 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
2508 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
2509 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
2510
2511 // Convert fix-point and float-pointing
2512 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
2513                 RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
2514                 Operand ImmTy, SDPatternOperator IntOp>
2515   : NeonI_2VShiftImm<q, u, opcode,
2516                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2517                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2518                      [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
2519                        (i32 ImmTy:$Imm))))],
2520                      NoItinerary>,
2521     Sched<[WriteFPALU, ReadFPALU]>;
2522
2523 multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
2524                               SDPatternOperator IntOp> {
2525   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
2526                       shr_imm32, IntOp> {
2527     let Inst{22-21} = 0b01;
2528   }
2529
2530   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
2531                       shr_imm32, IntOp> {
2532     let Inst{22-21} = 0b01;
2533   }
2534
2535   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
2536                       shr_imm64, IntOp> {
2537     let Inst{22} = 0b1;
2538   }
2539 }
2540
2541 multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
2542                               SDPatternOperator IntOp> {
2543   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
2544                       shr_imm32, IntOp> {
2545     let Inst{22-21} = 0b01;
2546   }
2547
2548   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
2549                       shr_imm32, IntOp> {
2550     let Inst{22-21} = 0b01;
2551   }
2552
2553   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
2554                       shr_imm64, IntOp> {
2555     let Inst{22} = 0b1;
2556   }
2557 }
2558
2559 // Convert fixed-point to floating-point
2560 defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
2561                                    int_arm_neon_vcvtfxs2fp>;
2562 defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
2563                                    int_arm_neon_vcvtfxu2fp>;
2564
2565 // Convert floating-point to fixed-point
2566 defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
2567                                    int_arm_neon_vcvtfp2fxs>;
2568 defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
2569                                    int_arm_neon_vcvtfp2fxu>;
2570
2571 multiclass Neon_sshll2_0<SDNode ext>
2572 {
2573   def _v8i8  : PatFrag<(ops node:$Rn),
2574                        (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
2575   def _v4i16 : PatFrag<(ops node:$Rn),
2576                        (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
2577   def _v2i32 : PatFrag<(ops node:$Rn),
2578                        (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
2579 }
2580
2581 defm NI_sext_high : Neon_sshll2_0<sext>;
2582 defm NI_zext_high : Neon_sshll2_0<zext>;
2583
2584
2585 //===----------------------------------------------------------------------===//
2586 // Multiclasses for NeonI_Across
2587 //===----------------------------------------------------------------------===//
2588
2589 // Variant 1
2590
2591 multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
2592                             string asmop, SDPatternOperator opnode>
2593 {
2594     def _1h8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2595                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2596                 asmop # "\t$Rd, $Rn.8b",
2597                 [(set (v1i16 FPR16:$Rd),
2598                     (v1i16 (opnode (v8i8 VPR64:$Rn))))],
2599                 NoItinerary>,
2600                 Sched<[WriteFPALU, ReadFPALU]>;
2601
2602     def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2603                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2604                 asmop # "\t$Rd, $Rn.16b",
2605                 [(set (v1i16 FPR16:$Rd),
2606                     (v1i16 (opnode (v16i8 VPR128:$Rn))))],
2607                 NoItinerary>,
2608                 Sched<[WriteFPALU, ReadFPALU]>;
2609
2610     def _1s4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2611                 (outs FPR32:$Rd), (ins VPR64:$Rn),
2612                 asmop # "\t$Rd, $Rn.4h",
2613                 [(set (v1i32 FPR32:$Rd),
2614                     (v1i32 (opnode (v4i16 VPR64:$Rn))))],
2615                 NoItinerary>,
2616                 Sched<[WriteFPALU, ReadFPALU]>;
2617
2618     def _1s8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2619                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2620                 asmop # "\t$Rd, $Rn.8h",
2621                 [(set (v1i32 FPR32:$Rd),
2622                     (v1i32 (opnode (v8i16 VPR128:$Rn))))],
2623                 NoItinerary>,
2624                 Sched<[WriteFPALU, ReadFPALU]>;
2625
2626     // _1d2s doesn't exist!
2627
2628     def _1d4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2629                 (outs FPR64:$Rd), (ins VPR128:$Rn),
2630                 asmop # "\t$Rd, $Rn.4s",
2631                 [(set (v1i64 FPR64:$Rd),
2632                     (v1i64 (opnode (v4i32 VPR128:$Rn))))],
2633                 NoItinerary>,
2634                 Sched<[WriteFPALU, ReadFPALU]>;
2635 }
2636
2637 defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
2638 defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
2639
2640 // Variant 2
2641
2642 multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
2643                             string asmop, SDPatternOperator opnode>
2644 {
2645     def _1b8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2646                 (outs FPR8:$Rd), (ins VPR64:$Rn),
2647                 asmop # "\t$Rd, $Rn.8b",
2648                 [(set (v1i8 FPR8:$Rd),
2649                     (v1i8 (opnode (v8i8 VPR64:$Rn))))],
2650                 NoItinerary>,
2651                 Sched<[WriteFPALU, ReadFPALU]>;
2652
2653     def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2654                 (outs FPR8:$Rd), (ins VPR128:$Rn),
2655                 asmop # "\t$Rd, $Rn.16b",
2656                 [(set (v1i8 FPR8:$Rd),
2657                     (v1i8 (opnode (v16i8 VPR128:$Rn))))],
2658                 NoItinerary>,
2659                 Sched<[WriteFPALU, ReadFPALU]>;
2660
2661     def _1h4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2662                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2663                 asmop # "\t$Rd, $Rn.4h",
2664                 [(set (v1i16 FPR16:$Rd),
2665                     (v1i16 (opnode (v4i16 VPR64:$Rn))))],
2666                 NoItinerary>,
2667                 Sched<[WriteFPALU, ReadFPALU]>;
2668
2669     def _1h8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2670                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2671                 asmop # "\t$Rd, $Rn.8h",
2672                 [(set (v1i16 FPR16:$Rd),
2673                     (v1i16 (opnode (v8i16 VPR128:$Rn))))],
2674                 NoItinerary>,
2675                 Sched<[WriteFPALU, ReadFPALU]>;
2676
2677     // _1s2s doesn't exist!
2678
2679     def _1s4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2680                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2681                 asmop # "\t$Rd, $Rn.4s",
2682                 [(set (v1i32 FPR32:$Rd),
2683                     (v1i32 (opnode (v4i32 VPR128:$Rn))))],
2684                 NoItinerary>,
2685                 Sched<[WriteFPALU, ReadFPALU]>;
2686 }
2687
2688 defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
2689 defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
2690
2691 defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
2692 defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
2693
2694 defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
2695
2696 // Variant 3
2697
2698 multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
2699                             string asmop, SDPatternOperator opnode> {
2700     def _1s4s:  NeonI_2VAcross<0b1, u, size, opcode,
2701                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2702                 asmop # "\t$Rd, $Rn.4s",
2703                 [(set (f32 FPR32:$Rd),
2704                     (f32 (opnode (v4f32 VPR128:$Rn))))],
2705                 NoItinerary>,
2706                 Sched<[WriteFPALU, ReadFPALU]>;
2707 }
2708
2709 defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
2710                                 int_aarch64_neon_vmaxnmv>;
2711 defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
2712                                 int_aarch64_neon_vminnmv>;
2713
2714 defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
2715                               int_aarch64_neon_vmaxv>;
2716 defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
2717                               int_aarch64_neon_vminv>;
2718
2719 // The followings are for instruction class (Perm)
2720
2721 class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
2722                     string asmop, RegisterOperand OpVPR, string OpS,
2723                     SDPatternOperator opnode, ValueType Ty>
2724   : NeonI_Perm<q, size, opcode,
2725                (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2726                asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
2727                [(set (Ty OpVPR:$Rd),
2728                   (Ty (opnode (Ty OpVPR:$Rn), (Ty OpVPR:$Rm))))],
2729                NoItinerary>,
2730     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2731
2732 multiclass NeonI_Perm_pat<bits<3> opcode, string asmop,
2733                           SDPatternOperator opnode> {
2734   def _8b  : NeonI_Permute<0b0, 0b00, opcode, asmop,
2735                            VPR64, "8b", opnode, v8i8>;
2736   def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop,
2737                            VPR128, "16b",opnode, v16i8>;
2738   def _4h  : NeonI_Permute<0b0, 0b01, opcode, asmop,
2739                            VPR64, "4h", opnode, v4i16>;
2740   def _8h  : NeonI_Permute<0b1, 0b01, opcode, asmop,
2741                            VPR128, "8h", opnode, v8i16>;
2742   def _2s  : NeonI_Permute<0b0, 0b10, opcode, asmop,
2743                            VPR64, "2s", opnode, v2i32>;
2744   def _4s  : NeonI_Permute<0b1, 0b10, opcode, asmop,
2745                            VPR128, "4s", opnode, v4i32>;
2746   def _2d  : NeonI_Permute<0b1, 0b11, opcode, asmop,
2747                            VPR128, "2d", opnode, v2i64>;
2748 }
2749
2750 defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1", Neon_uzp1>;
2751 defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1", Neon_trn1>;
2752 defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1", Neon_zip1>;
2753 defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2", Neon_uzp2>;
2754 defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2", Neon_trn2>;
2755 defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2", Neon_zip2>;
2756
2757 multiclass NeonI_Perm_float_pat<string INS, SDPatternOperator opnode> {
2758   def : Pat<(v2f32 (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
2759             (!cast<Instruction>(INS # "_2s") VPR64:$Rn, VPR64:$Rm)>;
2760
2761   def : Pat<(v4f32 (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
2762             (!cast<Instruction>(INS # "_4s") VPR128:$Rn, VPR128:$Rm)>;
2763
2764   def : Pat<(v2f64 (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
2765             (!cast<Instruction>(INS # "_2d") VPR128:$Rn, VPR128:$Rm)>;
2766 }
2767
2768 defm : NeonI_Perm_float_pat<"UZP1vvv", Neon_uzp1>;
2769 defm : NeonI_Perm_float_pat<"UZP2vvv", Neon_uzp2>;
2770 defm : NeonI_Perm_float_pat<"ZIP1vvv", Neon_zip1>;
2771 defm : NeonI_Perm_float_pat<"ZIP2vvv", Neon_zip2>;
2772 defm : NeonI_Perm_float_pat<"TRN1vvv", Neon_trn1>;
2773 defm : NeonI_Perm_float_pat<"TRN2vvv", Neon_trn2>;
2774
2775 // The followings are for instruction class (3V Diff)
2776
2777 // normal long/long2 pattern
2778 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
2779                  string asmop, string ResS, string OpS,
2780                  SDPatternOperator opnode, SDPatternOperator ext,
2781                  RegisterOperand OpVPR,
2782                  ValueType ResTy, ValueType OpTy>
2783   : NeonI_3VDiff<q, u, size, opcode,
2784                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2785                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2786                  [(set (ResTy VPR128:$Rd),
2787                     (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
2788                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2789                  NoItinerary>,
2790     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2791
2792 multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
2793                         string asmop, SDPatternOperator opnode,
2794                         bit Commutable = 0> {
2795   let isCommutable = Commutable in {
2796     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2797                            opnode, sext, VPR64, v8i16, v8i8>;
2798     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2799                            opnode, sext, VPR64, v4i32, v4i16>;
2800     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2801                            opnode, sext, VPR64, v2i64, v2i32>;
2802   }
2803 }
2804
2805 multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
2806                          SDPatternOperator opnode, bit Commutable = 0> {
2807   let isCommutable = Commutable in {
2808     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2809                             opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2810     def _4s8h  : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2811                             opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2812     def _2d4s  : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2813                             opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2814   }
2815 }
2816
2817 multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
2818                         SDPatternOperator opnode, bit Commutable = 0> {
2819   let isCommutable = Commutable in {
2820     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2821                            opnode, zext, VPR64, v8i16, v8i8>;
2822     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2823                            opnode, zext, VPR64, v4i32, v4i16>;
2824     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2825                            opnode, zext, VPR64, v2i64, v2i32>;
2826   }
2827 }
2828
2829 multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
2830                          SDPatternOperator opnode, bit Commutable = 0> {
2831   let isCommutable = Commutable in {
2832     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2833                             opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2834     def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2835                            opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2836     def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2837                            opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2838   }
2839 }
2840
2841 defm SADDLvvv :  NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
2842 defm UADDLvvv :  NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
2843
2844 defm SADDL2vvv :  NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
2845 defm UADDL2vvv :  NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
2846
2847 defm SSUBLvvv :  NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
2848 defm USUBLvvv :  NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
2849
2850 defm SSUBL2vvv :  NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
2851 defm USUBL2vvv :  NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
2852
2853 // normal wide/wide2 pattern
2854 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
2855                  string asmop, string ResS, string OpS,
2856                  SDPatternOperator opnode, SDPatternOperator ext,
2857                  RegisterOperand OpVPR,
2858                  ValueType ResTy, ValueType OpTy>
2859   : NeonI_3VDiff<q, u, size, opcode,
2860                  (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
2861                  asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
2862                  [(set (ResTy VPR128:$Rd),
2863                     (ResTy (opnode (ResTy VPR128:$Rn),
2864                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2865                  NoItinerary>,
2866     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2867
2868 multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
2869                         SDPatternOperator opnode> {
2870   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2871                          opnode, sext, VPR64, v8i16, v8i8>;
2872   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2873                          opnode, sext, VPR64, v4i32, v4i16>;
2874   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2875                          opnode, sext, VPR64, v2i64, v2i32>;
2876 }
2877
2878 defm SADDWvvv :  NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
2879 defm SSUBWvvv :  NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
2880
2881 multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
2882                          SDPatternOperator opnode> {
2883   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2884                           opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2885   def _4s8h  : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2886                           opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2887   def _2d4s  : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2888                           opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2889 }
2890
2891 defm SADDW2vvv :  NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
2892 defm SSUBW2vvv :  NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
2893
2894 multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
2895                         SDPatternOperator opnode> {
2896   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2897                          opnode, zext, VPR64, v8i16, v8i8>;
2898   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2899                          opnode, zext, VPR64, v4i32, v4i16>;
2900   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2901                          opnode, zext, VPR64, v2i64, v2i32>;
2902 }
2903
2904 defm UADDWvvv :  NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
2905 defm USUBWvvv :  NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
2906
2907 multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
2908                          SDPatternOperator opnode> {
2909   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2910                           opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2911   def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2912                          opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2913   def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2914                          opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2915 }
2916
2917 defm UADDW2vvv :  NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
2918 defm USUBW2vvv :  NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
2919
2920 // Get the high half part of the vector element.
2921 multiclass NeonI_get_high {
2922   def _8h : PatFrag<(ops node:$Rn),
2923                     (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
2924                                              (v8i16 (Neon_vdup (i32 8)))))))>;
2925   def _4s : PatFrag<(ops node:$Rn),
2926                     (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
2927                                               (v4i32 (Neon_vdup (i32 16)))))))>;
2928   def _2d : PatFrag<(ops node:$Rn),
2929                     (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
2930                                               (v2i64 (Neon_vdup (i32 32)))))))>;
2931 }
2932
2933 defm NI_get_hi : NeonI_get_high;
2934
2935 // pattern for addhn/subhn with 2 operands
2936 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2937                            string asmop, string ResS, string OpS,
2938                            SDPatternOperator opnode, SDPatternOperator get_hi,
2939                            ValueType ResTy, ValueType OpTy>
2940   : NeonI_3VDiff<q, u, size, opcode,
2941                  (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2942                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2943                  [(set (ResTy VPR64:$Rd),
2944                     (ResTy (get_hi
2945                       (OpTy (opnode (OpTy VPR128:$Rn),
2946                                     (OpTy VPR128:$Rm))))))],
2947                  NoItinerary>,
2948     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2949
2950 multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
2951                                 SDPatternOperator opnode, bit Commutable = 0> {
2952   let isCommutable = Commutable in {
2953     def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2954                                      opnode, NI_get_hi_8h, v8i8, v8i16>;
2955     def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2956                                      opnode, NI_get_hi_4s, v4i16, v4i32>;
2957     def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2958                                      opnode, NI_get_hi_2d, v2i32, v2i64>;
2959   }
2960 }
2961
2962 defm ADDHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
2963 defm SUBHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
2964
2965 // pattern for operation with 2 operands
2966 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2967                     string asmop, string ResS, string OpS,
2968                     SDPatternOperator opnode,
2969                     RegisterOperand ResVPR, RegisterOperand OpVPR,
2970                     ValueType ResTy, ValueType OpTy>
2971   : NeonI_3VDiff<q, u, size, opcode,
2972                  (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2973                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2974                  [(set (ResTy ResVPR:$Rd),
2975                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
2976                  NoItinerary>,
2977     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2978
2979 // normal narrow pattern
2980 multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
2981                           SDPatternOperator opnode, bit Commutable = 0> {
2982   let isCommutable = Commutable in {
2983     def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2984                               opnode, VPR64, VPR128, v8i8, v8i16>;
2985     def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2986                               opnode, VPR64, VPR128, v4i16, v4i32>;
2987     def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2988                               opnode, VPR64, VPR128, v2i32, v2i64>;
2989   }
2990 }
2991
2992 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
2993 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
2994
2995 // pattern for acle intrinsic with 3 operands
2996 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2997                      string asmop, string ResS, string OpS>
2998   : NeonI_3VDiff<q, u, size, opcode,
2999                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
3000                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3001                  [], NoItinerary>,
3002     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
3003   let Constraints = "$src = $Rd";
3004   let neverHasSideEffects = 1;
3005 }
3006
3007 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
3008   def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
3009   def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
3010   def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
3011 }
3012
3013 defm ADDHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
3014 defm SUBHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
3015
3016 defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
3017 defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
3018
3019 // Patterns have to be separate because there's a SUBREG_TO_REG in the output
3020 // part.
3021 class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
3022                         SDPatternOperator coreop>
3023   : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
3024                       (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
3025                                                         (SrcTy VPR128:$Rm)))))),
3026         (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
3027               VPR128:$Rn, VPR128:$Rm)>;
3028
3029 // addhn2 patterns
3030 def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8,  v8i16,
3031           BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
3032 def : NarrowHighHalfPat<ADDHN2vvv_8h4s,  v4i16, v4i32,
3033           BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
3034 def : NarrowHighHalfPat<ADDHN2vvv_4s2d,  v2i32, v2i64,
3035           BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
3036
3037 // subhn2 patterns
3038 def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8,  v8i16,
3039           BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
3040 def : NarrowHighHalfPat<SUBHN2vvv_8h4s,  v4i16, v4i32,
3041           BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
3042 def : NarrowHighHalfPat<SUBHN2vvv_4s2d,  v2i32, v2i64,
3043           BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
3044
3045 // raddhn2 patterns
3046 def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vraddhn>;
3047 def : NarrowHighHalfPat<RADDHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vraddhn>;
3048 def : NarrowHighHalfPat<RADDHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vraddhn>;
3049
3050 // rsubhn2 patterns
3051 def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vrsubhn>;
3052 def : NarrowHighHalfPat<RSUBHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vrsubhn>;
3053 def : NarrowHighHalfPat<RSUBHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vrsubhn>;
3054
3055 // pattern that need to extend result
3056 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
3057                      string asmop, string ResS, string OpS,
3058                      SDPatternOperator opnode,
3059                      RegisterOperand OpVPR,
3060                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
3061   : NeonI_3VDiff<q, u, size, opcode,
3062                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
3063                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3064                  [(set (ResTy VPR128:$Rd),
3065                     (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
3066                                                 (OpTy OpVPR:$Rm))))))],
3067                  NoItinerary>,
3068     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
3069
3070 multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
3071                            SDPatternOperator opnode, bit Commutable = 0> {
3072   let isCommutable = Commutable in {
3073     def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3074                                opnode, VPR64, v8i16, v8i8, v8i8>;
3075     def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3076                                opnode, VPR64, v4i32, v4i16, v4i16>;
3077     def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3078                                opnode, VPR64, v2i64, v2i32, v2i32>;
3079   }
3080 }
3081
3082 defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
3083 defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
3084
3085 multiclass NeonI_Op_High<SDPatternOperator op> {
3086   def _16B : PatFrag<(ops node:$Rn, node:$Rm),
3087                      (op (v8i8 (Neon_High16B node:$Rn)),
3088                          (v8i8 (Neon_High16B node:$Rm)))>;
3089   def _8H  : PatFrag<(ops node:$Rn, node:$Rm),
3090                      (op (v4i16 (Neon_High8H node:$Rn)),
3091                          (v4i16 (Neon_High8H node:$Rm)))>;
3092   def _4S  : PatFrag<(ops node:$Rn, node:$Rm),
3093                      (op (v2i32 (Neon_High4S node:$Rn)),
3094                          (v2i32 (Neon_High4S node:$Rm)))>;
3095 }
3096
3097 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
3098 defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
3099 defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
3100 defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
3101 defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
3102 defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
3103
3104 multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
3105                             bit Commutable = 0> {
3106   let isCommutable = Commutable in {
3107     def _8h8b  : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3108                                 !cast<PatFrag>(opnode # "_16B"),
3109                                 VPR128, v8i16, v16i8, v8i8>;
3110     def _4s4h  : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3111                                 !cast<PatFrag>(opnode # "_8H"),
3112                                 VPR128, v4i32, v8i16, v4i16>;
3113     def _2d2s  : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3114                                 !cast<PatFrag>(opnode # "_4S"),
3115                                 VPR128, v2i64, v4i32, v2i32>;
3116   }
3117 }
3118
3119 defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
3120 defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
3121
3122 // For pattern that need two operators being chained.
3123 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
3124                      string asmop, string ResS, string OpS,
3125                      SDPatternOperator opnode, SDPatternOperator subop,
3126                      RegisterOperand OpVPR,
3127                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
3128   : NeonI_3VDiff<q, u, size, opcode,
3129                  (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
3130                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3131                  [(set (ResTy VPR128:$Rd),
3132                     (ResTy (opnode
3133                       (ResTy VPR128:$src),
3134                       (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
3135                                                  (OpTy OpVPR:$Rm))))))))],
3136                  NoItinerary>,
3137     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
3138   let Constraints = "$src = $Rd";
3139 }
3140
3141 multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
3142                              SDPatternOperator opnode, SDPatternOperator subop>{
3143   def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3144                              opnode, subop, VPR64, v8i16, v8i8, v8i8>;
3145   def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3146                              opnode, subop, VPR64, v4i32, v4i16, v4i16>;
3147   def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3148                              opnode, subop, VPR64, v2i64, v2i32, v2i32>;
3149 }
3150
3151 defm SABALvvv :  NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
3152                                    add, int_arm_neon_vabds>;
3153 defm UABALvvv :  NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
3154                                    add, int_arm_neon_vabdu>;
3155
3156 multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
3157                               SDPatternOperator opnode, string subop> {
3158   def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3159                              opnode, !cast<PatFrag>(subop # "_16B"),
3160                              VPR128, v8i16, v16i8, v8i8>;
3161   def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3162                              opnode, !cast<PatFrag>(subop # "_8H"),
3163                              VPR128, v4i32, v8i16, v4i16>;
3164   def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3165                              opnode, !cast<PatFrag>(subop # "_4S"),
3166                              VPR128, v2i64, v4i32, v2i32>;
3167 }
3168
3169 defm SABAL2vvv :  NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
3170                                      "NI_sabdl_hi">;
3171 defm UABAL2vvv :  NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
3172                                      "NI_uabdl_hi">;
3173
3174 // Long pattern with 2 operands
3175 multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
3176                           SDPatternOperator opnode, bit Commutable = 0> {
3177   let isCommutable = Commutable,
3178       SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
3179     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3180                               opnode, VPR128, VPR64, v8i16, v8i8>;
3181     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3182                               opnode, VPR128, VPR64, v4i32, v4i16>;
3183     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3184                               opnode, VPR128, VPR64, v2i64, v2i32>;
3185   }
3186 }
3187
3188 defm SMULLvvv :  NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
3189 defm UMULLvvv :  NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
3190
3191 class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
3192                            string asmop, string ResS, string OpS,
3193                            SDPatternOperator opnode,
3194                            ValueType ResTy, ValueType OpTy>
3195   : NeonI_3VDiff<q, u, size, opcode,
3196                  (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3197                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3198                  [(set (ResTy VPR128:$Rd),
3199                     (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
3200                  NoItinerary>,
3201     Sched<[WriteFPMul, ReadFPMul, ReadFPMul]>;
3202
3203 multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
3204                                    string opnode, bit Commutable = 0> {
3205   let isCommutable = Commutable in {
3206     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3207                                       !cast<PatFrag>(opnode # "_16B"),
3208                                       v8i16, v16i8>;
3209     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3210                                      !cast<PatFrag>(opnode # "_8H"),
3211                                      v4i32, v8i16>;
3212     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3213                                      !cast<PatFrag>(opnode # "_4S"),
3214                                      v2i64, v4i32>;
3215   }
3216 }
3217
3218 defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
3219                                          "NI_smull_hi", 1>;
3220 defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
3221                                          "NI_umull_hi", 1>;
3222
3223 // Long pattern with 3 operands
3224 class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
3225                      string asmop, string ResS, string OpS,
3226                      SDPatternOperator opnode,
3227                      ValueType ResTy, ValueType OpTy>
3228   : NeonI_3VDiff<q, u, size, opcode,
3229                  (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
3230                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3231                  [(set (ResTy VPR128:$Rd),
3232                     (ResTy (opnode
3233                       (ResTy VPR128:$src),
3234                       (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
3235                NoItinerary>,
3236     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
3237   let Constraints = "$src = $Rd";
3238 }
3239
3240 multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
3241                              SDPatternOperator opnode> {
3242   def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3243                              opnode, v8i16, v8i8>;
3244   def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3245                              opnode, v4i32, v4i16>;
3246   def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3247                              opnode, v2i64, v2i32>;
3248 }
3249
3250 def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3251                          (add node:$Rd,
3252                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
3253
3254 def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3255                          (add node:$Rd,
3256                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
3257
3258 def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3259                          (sub node:$Rd,
3260                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
3261
3262 def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3263                          (sub node:$Rd,
3264                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
3265
3266 defm SMLALvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
3267 defm UMLALvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
3268
3269 defm SMLSLvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
3270 defm UMLSLvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
3271
3272 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
3273                            string asmop, string ResS, string OpS,
3274                            SDPatternOperator subop, SDPatternOperator opnode,
3275                            RegisterOperand OpVPR,
3276                            ValueType ResTy, ValueType OpTy>
3277   : NeonI_3VDiff<q, u, size, opcode,
3278                (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
3279                asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3280                [(set (ResTy VPR128:$Rd),
3281                   (ResTy (subop
3282                     (ResTy VPR128:$src),
3283                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
3284                NoItinerary>,
3285     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
3286   let Constraints = "$src = $Rd";
3287 }
3288
3289 multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop,
3290                                    SDPatternOperator subop, string opnode> {
3291   def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3292                                     subop, !cast<PatFrag>(opnode # "_16B"),
3293                                     VPR128, v8i16, v16i8>;
3294   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3295                                    subop, !cast<PatFrag>(opnode # "_8H"),
3296                                    VPR128, v4i32, v8i16>;
3297   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3298                                    subop, !cast<PatFrag>(opnode # "_4S"),
3299                                    VPR128, v2i64, v4i32>;
3300 }
3301
3302 defm SMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
3303                                           add, "NI_smull_hi">;
3304 defm UMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
3305                                           add, "NI_umull_hi">;
3306
3307 defm SMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
3308                                           sub, "NI_smull_hi">;
3309 defm UMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
3310                                           sub, "NI_umull_hi">;
3311
3312 multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
3313                                     SDPatternOperator opnode> {
3314   def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3315                                    opnode, int_arm_neon_vqdmull,
3316                                    VPR64, v4i32, v4i16>;
3317   def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3318                                    opnode, int_arm_neon_vqdmull,
3319                                    VPR64, v2i64, v2i32>;
3320 }
3321
3322 defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
3323                                            int_arm_neon_vqadds>;
3324 defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
3325                                            int_arm_neon_vqsubs>;
3326
3327 multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
3328                          SDPatternOperator opnode, bit Commutable = 0> {
3329   let isCommutable = Commutable in {
3330     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3331                               opnode, VPR128, VPR64, v4i32, v4i16>;
3332     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3333                               opnode, VPR128, VPR64, v2i64, v2i32>;
3334   }
3335 }
3336
3337 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
3338 defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
3339                                 int_arm_neon_vqdmull, 1>;
3340 }
3341
3342 multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop,
3343                                    string opnode, bit Commutable = 0> {
3344   let isCommutable = Commutable in {
3345     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3346                                      !cast<PatFrag>(opnode # "_8H"),
3347                                      v4i32, v8i16>;
3348     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3349                                      !cast<PatFrag>(opnode # "_4S"),
3350                                      v2i64, v4i32>;
3351   }
3352 }
3353
3354 defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
3355                                            "NI_qdmull_hi", 1>;
3356
3357 multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop,
3358                                      SDPatternOperator opnode> {
3359   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3360                                    opnode, NI_qdmull_hi_8H,
3361                                    VPR128, v4i32, v8i16>;
3362   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3363                                    opnode, NI_qdmull_hi_4S,
3364                                    VPR128, v2i64, v4i32>;
3365 }
3366
3367 defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
3368                                              int_arm_neon_vqadds>;
3369 defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
3370                                              int_arm_neon_vqsubs>;
3371
3372 multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
3373                          SDPatternOperator opnode_8h8b,
3374                          SDPatternOperator opnode_1q1d, bit Commutable = 0> {
3375   let isCommutable = Commutable in {
3376     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3377                               opnode_8h8b, VPR128, VPR64, v8i16, v8i8>;
3378
3379     def _1q1d : NeonI_3VD_2Op<0b0, u, 0b11, opcode, asmop, "1q", "1d",
3380                               opnode_1q1d, VPR128, VPR64, v16i8, v1i64>;
3381   }
3382 }
3383
3384 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in
3385 defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp,
3386                               int_aarch64_neon_vmull_p64, 1>;
3387
3388 multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop,
3389                                    string opnode, bit Commutable = 0> {
3390   let isCommutable = Commutable in {
3391     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3392                                       !cast<PatFrag>(opnode # "_16B"),
3393                                       v8i16, v16i8>;
3394
3395     def _1q2d : 
3396       NeonI_3VDiff<0b1, u, 0b11, opcode,
3397                    (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3398                    asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
3399                    [(set (v16i8 VPR128:$Rd),
3400                       (v16i8 (int_aarch64_neon_vmull_p64 
3401                         (v1i64 (scalar_to_vector
3402                           (i64 (vector_extract (v2i64 VPR128:$Rn), 1)))),
3403                         (v1i64 (scalar_to_vector
3404                           (i64 (vector_extract (v2i64 VPR128:$Rm), 1)))))))],
3405                    NoItinerary>,
3406       Sched<[WriteFPMul, ReadFPMul, ReadFPMul]>;
3407   }
3408
3409   def : Pat<(v16i8 (int_aarch64_neon_vmull_p64
3410                       (v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 1))),
3411                       (v1i64 (extract_subvector (v2i64 VPR128:$Rm), (i64 1))))),
3412             (!cast<Instruction>(NAME # "_1q2d") VPR128:$Rn, VPR128:$Rm)>;
3413 }
3414
3415 defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
3416                                          1>;
3417
3418 // End of implementation for instruction class (3V Diff)
3419
3420 // The followings are vector load/store multiple N-element structure
3421 // (class SIMD lselem).
3422
3423 // ld1:         load multiple 1-element structure to 1/2/3/4 registers.
3424 // ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
3425 //              The structure consists of a sequence of sets of N values.
3426 //              The first element of the structure is placed in the first lane
3427 //              of the first first vector, the second element in the first lane
3428 //              of the second vector, and so on.
3429 // E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
3430 // the three 64-bit vectors list {BA, DC, FE}.
3431 // E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
3432 // 64-bit vectors list {DA, EB, FC}.
3433 // Store instructions store multiple structure to N registers like load.
3434
3435
3436 class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
3437                     RegisterOperand VecList, string asmop>
3438   : NeonI_LdStMult<q, 1, opcode, size,
3439                  (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3440                  asmop # "\t$Rt, [$Rn]",
3441                  [],
3442                  NoItinerary>,
3443     Sched<[WriteVecLd, ReadVecLd]> {
3444   let mayLoad = 1;
3445   let neverHasSideEffects = 1;
3446 }
3447
3448 multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
3449   def _8B : NeonI_LDVList<0, opcode, 0b00,
3450                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3451
3452   def _4H : NeonI_LDVList<0, opcode, 0b01,
3453                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3454
3455   def _2S : NeonI_LDVList<0, opcode, 0b10,
3456                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3457
3458   def _16B : NeonI_LDVList<1, opcode, 0b00,
3459                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3460
3461   def _8H : NeonI_LDVList<1, opcode, 0b01,
3462                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3463
3464   def _4S : NeonI_LDVList<1, opcode, 0b10,
3465                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3466
3467   def _2D : NeonI_LDVList<1, opcode, 0b11,
3468                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3469 }
3470
3471 // Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
3472 defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
3473 def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
3474
3475 defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
3476
3477 defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
3478
3479 defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
3480
3481 // Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
3482 defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
3483 def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
3484
3485 defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
3486 def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
3487
3488 defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
3489 def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
3490
3491 class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
3492                     RegisterOperand VecList, string asmop>
3493   : NeonI_LdStMult<q, 0, opcode, size,
3494                  (outs), (ins GPR64xsp:$Rn, VecList:$Rt),
3495                  asmop # "\t$Rt, [$Rn]",
3496                  [],
3497                  NoItinerary>,
3498     Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
3499   let mayStore = 1;
3500   let neverHasSideEffects = 1;
3501 }
3502
3503 multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
3504   def _8B : NeonI_STVList<0, opcode, 0b00,
3505                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3506
3507   def _4H : NeonI_STVList<0, opcode, 0b01,
3508                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3509
3510   def _2S : NeonI_STVList<0, opcode, 0b10,
3511                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3512
3513   def _16B : NeonI_STVList<1, opcode, 0b00,
3514                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3515
3516   def _8H : NeonI_STVList<1, opcode, 0b01,
3517                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3518
3519   def _4S : NeonI_STVList<1, opcode, 0b10,
3520                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3521
3522   def _2D : NeonI_STVList<1, opcode, 0b11,
3523                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3524 }
3525
3526 // Store multiple N-element structures from N registers (N = 1,2,3,4)
3527 defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
3528 def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
3529
3530 defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
3531
3532 defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
3533
3534 defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
3535
3536 // Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
3537 defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
3538 def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
3539
3540 defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
3541 def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
3542
3543 defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
3544 def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
3545
3546 def : Pat<(v2f64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
3547 def : Pat<(v2i64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
3548
3549 def : Pat<(v4f32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
3550 def : Pat<(v4i32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
3551
3552 def : Pat<(v8i16 (load GPR64xsp:$addr)), (LD1_8H GPR64xsp:$addr)>;
3553 def : Pat<(v16i8 (load GPR64xsp:$addr)), (LD1_16B GPR64xsp:$addr)>;
3554
3555 def : Pat<(v1f64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
3556 def : Pat<(v1i64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
3557
3558 def : Pat<(v2f32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
3559 def : Pat<(v2i32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
3560
3561 def : Pat<(v4i16 (load GPR64xsp:$addr)), (LD1_4H GPR64xsp:$addr)>;
3562 def : Pat<(v8i8 (load GPR64xsp:$addr)), (LD1_8B GPR64xsp:$addr)>;
3563
3564 def : Pat<(store (v2i64 VPR128:$value), GPR64xsp:$addr),
3565           (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
3566 def : Pat<(store (v2f64 VPR128:$value), GPR64xsp:$addr),
3567           (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
3568
3569 def : Pat<(store (v4i32 VPR128:$value), GPR64xsp:$addr),
3570           (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
3571 def : Pat<(store (v4f32 VPR128:$value), GPR64xsp:$addr),
3572           (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
3573
3574 def : Pat<(store (v8i16 VPR128:$value), GPR64xsp:$addr),
3575           (ST1_8H GPR64xsp:$addr, VPR128:$value)>;
3576 def : Pat<(store (v16i8 VPR128:$value), GPR64xsp:$addr),
3577           (ST1_16B GPR64xsp:$addr, VPR128:$value)>;
3578
3579 def : Pat<(store (v1i64 VPR64:$value), GPR64xsp:$addr),
3580           (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
3581 def : Pat<(store (v1f64 VPR64:$value), GPR64xsp:$addr),
3582           (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
3583
3584 def : Pat<(store (v2i32 VPR64:$value), GPR64xsp:$addr),
3585           (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
3586 def : Pat<(store (v2f32 VPR64:$value), GPR64xsp:$addr),
3587           (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
3588
3589 def : Pat<(store (v4i16 VPR64:$value), GPR64xsp:$addr),
3590           (ST1_4H GPR64xsp:$addr, VPR64:$value)>;
3591 def : Pat<(store (v8i8 VPR64:$value), GPR64xsp:$addr),
3592           (ST1_8B GPR64xsp:$addr, VPR64:$value)>;
3593
3594 // Match load/store of v1i8/v1i16/v1i32 type to FPR8/FPR16/FPR32 load/store.
3595 // FIXME: for now we have v1i8, v1i16, v1i32 legal types, if they are illegal,
3596 // these patterns are not needed any more.
3597 def : Pat<(v1i8 (load GPR64xsp:$addr)), (LSFP8_LDR $addr, 0)>;
3598 def : Pat<(v1i16 (load GPR64xsp:$addr)), (LSFP16_LDR $addr, 0)>;
3599 def : Pat<(v1i32 (load GPR64xsp:$addr)), (LSFP32_LDR $addr, 0)>;
3600
3601 def : Pat<(store (v1i8 FPR8:$value), GPR64xsp:$addr),
3602           (LSFP8_STR $value, $addr, 0)>;
3603 def : Pat<(store (v1i16 FPR16:$value), GPR64xsp:$addr),
3604           (LSFP16_STR $value, $addr, 0)>;
3605 def : Pat<(store (v1i32 FPR32:$value), GPR64xsp:$addr),
3606           (LSFP32_STR $value, $addr, 0)>;
3607
3608
3609 // End of vector load/store multiple N-element structure(class SIMD lselem)
3610
3611 // The followings are post-index vector load/store multiple N-element
3612 // structure(class SIMD lselem-post)
3613 def exact1_asmoperand : AsmOperandClass {
3614   let Name = "Exact1";
3615   let PredicateMethod = "isExactImm<1>";
3616   let RenderMethod = "addImmOperands";
3617 }
3618 def uimm_exact1 : Operand<i32>, ImmLeaf<i32, [{return Imm == 1;}]> {
3619   let ParserMatchClass = exact1_asmoperand;
3620 }
3621
3622 def exact2_asmoperand : AsmOperandClass {
3623   let Name = "Exact2";
3624   let PredicateMethod = "isExactImm<2>";
3625   let RenderMethod = "addImmOperands";
3626 }
3627 def uimm_exact2 : Operand<i32>, ImmLeaf<i32, [{return Imm == 2;}]> {
3628   let ParserMatchClass = exact2_asmoperand;
3629 }
3630
3631 def exact3_asmoperand : AsmOperandClass {
3632   let Name = "Exact3";
3633   let PredicateMethod = "isExactImm<3>";
3634   let RenderMethod = "addImmOperands";
3635 }
3636 def uimm_exact3 : Operand<i32>, ImmLeaf<i32, [{return Imm == 3;}]> {
3637   let ParserMatchClass = exact3_asmoperand;
3638 }
3639
3640 def exact4_asmoperand : AsmOperandClass {
3641   let Name = "Exact4";
3642   let PredicateMethod = "isExactImm<4>";
3643   let RenderMethod = "addImmOperands";
3644 }
3645 def uimm_exact4 : Operand<i32>, ImmLeaf<i32, [{return Imm == 4;}]> {
3646   let ParserMatchClass = exact4_asmoperand;
3647 }
3648
3649 def exact6_asmoperand : AsmOperandClass {
3650   let Name = "Exact6";
3651   let PredicateMethod = "isExactImm<6>";
3652   let RenderMethod = "addImmOperands";
3653 }
3654 def uimm_exact6 : Operand<i32>, ImmLeaf<i32, [{return Imm == 6;}]> {
3655   let ParserMatchClass = exact6_asmoperand;
3656 }
3657
3658 def exact8_asmoperand : AsmOperandClass {
3659   let Name = "Exact8";
3660   let PredicateMethod = "isExactImm<8>";
3661   let RenderMethod = "addImmOperands";
3662 }
3663 def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
3664   let ParserMatchClass = exact8_asmoperand;
3665 }
3666
3667 def exact12_asmoperand : AsmOperandClass {
3668   let Name = "Exact12";
3669   let PredicateMethod = "isExactImm<12>";
3670   let RenderMethod = "addImmOperands";
3671 }
3672 def uimm_exact12 : Operand<i32>, ImmLeaf<i32, [{return Imm == 12;}]> {
3673   let ParserMatchClass = exact12_asmoperand;
3674 }
3675
3676 def exact16_asmoperand : AsmOperandClass {
3677   let Name = "Exact16";
3678   let PredicateMethod = "isExactImm<16>";
3679   let RenderMethod = "addImmOperands";
3680 }
3681 def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
3682   let ParserMatchClass = exact16_asmoperand;
3683 }
3684
3685 def exact24_asmoperand : AsmOperandClass {
3686   let Name = "Exact24";
3687   let PredicateMethod = "isExactImm<24>";
3688   let RenderMethod = "addImmOperands";
3689 }
3690 def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
3691   let ParserMatchClass = exact24_asmoperand;
3692 }
3693
3694 def exact32_asmoperand : AsmOperandClass {
3695   let Name = "Exact32";
3696   let PredicateMethod = "isExactImm<32>";
3697   let RenderMethod = "addImmOperands";
3698 }
3699 def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
3700   let ParserMatchClass = exact32_asmoperand;
3701 }
3702
3703 def exact48_asmoperand : AsmOperandClass {
3704   let Name = "Exact48";
3705   let PredicateMethod = "isExactImm<48>";
3706   let RenderMethod = "addImmOperands";
3707 }
3708 def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
3709   let ParserMatchClass = exact48_asmoperand;
3710 }
3711
3712 def exact64_asmoperand : AsmOperandClass {
3713   let Name = "Exact64";
3714   let PredicateMethod = "isExactImm<64>";
3715   let RenderMethod = "addImmOperands";
3716 }
3717 def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
3718   let ParserMatchClass = exact64_asmoperand;
3719 }
3720
3721 multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
3722                            RegisterOperand VecList, Operand ImmTy,
3723                            string asmop> {
3724   let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1,
3725       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3726     def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
3727                      (outs VecList:$Rt, GPR64xsp:$wb),
3728                      (ins GPR64xsp:$Rn, ImmTy:$amt),
3729                      asmop # "\t$Rt, [$Rn], $amt",
3730                      [],
3731                      NoItinerary>,
3732                  Sched<[WriteVecLd, WriteVecLd, ReadVecLd]> {
3733       let Rm = 0b11111;
3734     }
3735
3736     def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
3737                         (outs VecList:$Rt, GPR64xsp:$wb),
3738                         (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
3739                         asmop # "\t$Rt, [$Rn], $Rm",
3740                         [],
3741                         NoItinerary>,
3742                     Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]>;
3743   }
3744 }
3745
3746 multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3747     Operand ImmTy2, string asmop> {
3748   defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
3749                               !cast<RegisterOperand>(List # "8B_operand"),
3750                               ImmTy, asmop>;
3751
3752   defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
3753                               !cast<RegisterOperand>(List # "4H_operand"),
3754                               ImmTy, asmop>;
3755
3756   defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
3757                               !cast<RegisterOperand>(List # "2S_operand"),
3758                               ImmTy, asmop>;
3759
3760   defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
3761                                !cast<RegisterOperand>(List # "16B_operand"),
3762                                ImmTy2, asmop>;
3763
3764   defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
3765                               !cast<RegisterOperand>(List # "8H_operand"),
3766                               ImmTy2, asmop>;
3767
3768   defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
3769                               !cast<RegisterOperand>(List # "4S_operand"),
3770                               ImmTy2, asmop>;
3771
3772   defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
3773                               !cast<RegisterOperand>(List # "2D_operand"),
3774                               ImmTy2, asmop>;
3775 }
3776
3777 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3778 defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
3779 defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3780                                  "ld1">;
3781
3782 defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
3783
3784 defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3785                              "ld3">;
3786
3787 defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
3788
3789 // Post-index load multiple 1-element structures from N consecutive registers
3790 // (N = 2,3,4)
3791 defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3792                                "ld1">;
3793 defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3794                                    uimm_exact16, "ld1">;
3795
3796 defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3797                                "ld1">;
3798 defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3799                                    uimm_exact24, "ld1">;
3800
3801 defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3802                                 "ld1">;
3803 defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3804                                    uimm_exact32, "ld1">;
3805
3806 multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
3807                             RegisterOperand VecList, Operand ImmTy,
3808                             string asmop> {
3809   let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
3810       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3811     def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
3812                      (outs GPR64xsp:$wb),
3813                      (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
3814                      asmop # "\t$Rt, [$Rn], $amt",
3815                      [],
3816                      NoItinerary>,
3817                  Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
3818       let Rm = 0b11111;
3819     }
3820
3821     def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
3822                       (outs GPR64xsp:$wb),
3823                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt),
3824                       asmop # "\t$Rt, [$Rn], $Rm",
3825                       [],
3826                       NoItinerary>,
3827                     Sched<[WriteVecSt, ReadVecSt, ReadVecSt, ReadVecSt]>;
3828   }
3829 }
3830
3831 multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3832                            Operand ImmTy2, string asmop> {
3833   defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
3834                  !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
3835
3836   defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
3837                               !cast<RegisterOperand>(List # "4H_operand"),
3838                               ImmTy, asmop>;
3839
3840   defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
3841                               !cast<RegisterOperand>(List # "2S_operand"),
3842                               ImmTy, asmop>;
3843
3844   defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
3845                                !cast<RegisterOperand>(List # "16B_operand"),
3846                                ImmTy2, asmop>;
3847
3848   defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
3849                               !cast<RegisterOperand>(List # "8H_operand"),
3850                               ImmTy2, asmop>;
3851
3852   defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
3853                               !cast<RegisterOperand>(List # "4S_operand"),
3854                               ImmTy2, asmop>;
3855
3856   defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
3857                               !cast<RegisterOperand>(List # "2D_operand"),
3858                               ImmTy2, asmop>;
3859 }
3860
3861 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3862 defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
3863 defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3864                                  "st1">;
3865
3866 defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
3867
3868 defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3869                              "st3">;
3870
3871 defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
3872
3873 // Post-index load multiple 1-element structures from N consecutive registers
3874 // (N = 2,3,4)
3875 defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3876                                "st1">;
3877 defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3878                                    uimm_exact16, "st1">;
3879
3880 defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3881                                "st1">;
3882 defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3883                                    uimm_exact24, "st1">;
3884
3885 defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3886                                "st1">;
3887 defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3888                                    uimm_exact32, "st1">;
3889
3890 // End of post-index vector load/store multiple N-element structure
3891 // (class SIMD lselem-post)
3892
3893 // The followings are vector load/store single N-element structure
3894 // (class SIMD lsone).
3895 def neon_uimm0_bare : Operand<i64>,
3896                         ImmLeaf<i64, [{return Imm == 0;}]> {
3897   let ParserMatchClass = neon_uimm0_asmoperand;
3898   let PrintMethod = "printUImmBareOperand";
3899 }
3900
3901 def neon_uimm1_bare : Operand<i64>,
3902                         ImmLeaf<i64, [{return Imm < 2;}]> {
3903   let ParserMatchClass = neon_uimm1_asmoperand;
3904   let PrintMethod = "printUImmBareOperand";
3905 }
3906
3907 def neon_uimm2_bare : Operand<i64>,
3908                         ImmLeaf<i64, [{return Imm < 4;}]> {
3909   let ParserMatchClass = neon_uimm2_asmoperand;
3910   let PrintMethod = "printUImmBareOperand";
3911 }
3912
3913 def neon_uimm3_bare : Operand<i64>,
3914                         ImmLeaf<i64, [{return Imm < 8;}]> {
3915   let ParserMatchClass = uimm3_asmoperand;
3916   let PrintMethod = "printUImmBareOperand";
3917 }
3918
3919 def neon_uimm4_bare : Operand<i64>,
3920                         ImmLeaf<i64, [{return Imm < 16;}]> {
3921   let ParserMatchClass = uimm4_asmoperand;
3922   let PrintMethod = "printUImmBareOperand";
3923 }
3924
3925 class NeonI_LDN_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
3926                     RegisterOperand VecList, string asmop>
3927     : NeonI_LdOne_Dup<q, r, opcode, size,
3928                       (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3929                       asmop # "\t$Rt, [$Rn]",
3930                       [],
3931                       NoItinerary>,
3932       Sched<[WriteVecLd, ReadVecLd]> {
3933   let mayLoad = 1;
3934   let neverHasSideEffects = 1;
3935 }
3936
3937 multiclass LDN_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop> {
3938   def _8B : NeonI_LDN_Dup<0, r, opcode, 0b00,
3939                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3940
3941   def _4H : NeonI_LDN_Dup<0, r, opcode, 0b01,
3942                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3943
3944   def _2S : NeonI_LDN_Dup<0, r, opcode, 0b10,
3945                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3946
3947   def _1D : NeonI_LDN_Dup<0, r, opcode, 0b11,
3948                           !cast<RegisterOperand>(List # "1D_operand"), asmop>;
3949
3950   def _16B : NeonI_LDN_Dup<1, r, opcode, 0b00,
3951                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3952
3953   def _8H : NeonI_LDN_Dup<1, r, opcode, 0b01,
3954                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3955
3956   def _4S : NeonI_LDN_Dup<1, r, opcode, 0b10,
3957                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3958
3959   def _2D : NeonI_LDN_Dup<1, r, opcode, 0b11,
3960                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3961 }
3962
3963 // Load single 1-element structure to all lanes of 1 register
3964 defm LD1R : LDN_Dup_BHSD<0b0, 0b110, "VOne", "ld1r">;
3965
3966 // Load single N-element structure to all lanes of N consecutive
3967 // registers (N = 2,3,4)
3968 defm LD2R : LDN_Dup_BHSD<0b1, 0b110, "VPair", "ld2r">;
3969 defm LD3R : LDN_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r">;
3970 defm LD4R : LDN_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r">;
3971
3972
3973 class LD1R_pattern <ValueType VTy, ValueType DTy, PatFrag LoadOp,
3974                     Instruction INST>
3975     : Pat<(VTy (Neon_vdup (DTy (LoadOp GPR64xsp:$Rn)))),
3976           (VTy (INST GPR64xsp:$Rn))>;
3977
3978 // Match all LD1R instructions
3979 def : LD1R_pattern<v8i8, i32, extloadi8, LD1R_8B>;
3980
3981 def : LD1R_pattern<v16i8, i32, extloadi8, LD1R_16B>;
3982
3983 def : LD1R_pattern<v4i16, i32, extloadi16, LD1R_4H>;
3984
3985 def : LD1R_pattern<v8i16, i32, extloadi16, LD1R_8H>;
3986
3987 def : LD1R_pattern<v2i32, i32, load, LD1R_2S>;
3988 def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
3989
3990 def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
3991 def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
3992
3993 def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
3994 def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
3995
3996 class LD1R_pattern_v1 <ValueType VTy, ValueType DTy, PatFrag LoadOp,
3997                        Instruction INST>
3998   : Pat<(VTy (scalar_to_vector (DTy (LoadOp GPR64xsp:$Rn)))),
3999         (VTy (INST GPR64xsp:$Rn))>;
4000
4001 def : LD1R_pattern_v1<v1i64, i64, load, LD1R_1D>;
4002 def : LD1R_pattern_v1<v1f64, f64, load, LD1R_1D>;
4003
4004 multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
4005                                 RegisterClass RegList> {
4006   defm B : VectorList_operands<PREFIX, "B", Count, RegList>;
4007   defm H : VectorList_operands<PREFIX, "H", Count, RegList>;
4008   defm S : VectorList_operands<PREFIX, "S", Count, RegList>;
4009   defm D : VectorList_operands<PREFIX, "D", Count, RegList>;
4010 }
4011
4012 // Special vector list operand of 128-bit vectors with bare layout.
4013 // i.e. only show ".b", ".h", ".s", ".d"
4014 defm VOne : VectorList_Bare_BHSD<"VOne", 1, FPR128>;
4015 defm VPair : VectorList_Bare_BHSD<"VPair", 2, QPair>;
4016 defm VTriple : VectorList_Bare_BHSD<"VTriple", 3, QTriple>;
4017 defm VQuad : VectorList_Bare_BHSD<"VQuad", 4, QQuad>;
4018
4019 class NeonI_LDN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4020                      Operand ImmOp, string asmop>
4021     : NeonI_LdStOne_Lane<1, r, op2_1, op0,
4022                          (outs VList:$Rt),
4023                          (ins GPR64xsp:$Rn, VList:$src, ImmOp:$lane),
4024                          asmop # "\t$Rt[$lane], [$Rn]",
4025                          [],
4026                          NoItinerary>,
4027       Sched<[WriteVecLd, ReadVecLd, ReadVecLd]> {
4028   let mayLoad = 1;
4029   let neverHasSideEffects = 1;
4030   let hasExtraDefRegAllocReq = 1;
4031   let Constraints = "$src = $Rt";
4032 }
4033
4034 multiclass LDN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
4035   def _B : NeonI_LDN_Lane<r, 0b00, op0,
4036                           !cast<RegisterOperand>(List # "B_operand"),
4037                           neon_uimm4_bare, asmop> {
4038     let Inst{12-10} = lane{2-0};
4039     let Inst{30} = lane{3};
4040   }
4041
4042   def _H : NeonI_LDN_Lane<r, 0b01, op0,
4043                           !cast<RegisterOperand>(List # "H_operand"),
4044                           neon_uimm3_bare, asmop> {
4045     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4046     let Inst{30} = lane{2};
4047   }
4048
4049   def _S : NeonI_LDN_Lane<r, 0b10, op0,
4050                           !cast<RegisterOperand>(List # "S_operand"),
4051                           neon_uimm2_bare, asmop> {
4052     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4053     let Inst{30} = lane{1};
4054   }
4055
4056   def _D : NeonI_LDN_Lane<r, 0b10, op0,
4057                           !cast<RegisterOperand>(List # "D_operand"),
4058                           neon_uimm1_bare, asmop> {
4059     let Inst{12-10} = 0b001;
4060     let Inst{30} = lane{0};
4061   }
4062 }
4063
4064 // Load single 1-element structure to one lane of 1 register.
4065 defm LD1LN : LDN_Lane_BHSD<0b0, 0b0, "VOne", "ld1">;
4066
4067 // Load single N-element structure to one lane of N consecutive registers
4068 // (N = 2,3,4)
4069 defm LD2LN : LDN_Lane_BHSD<0b1, 0b0, "VPair", "ld2">;
4070 defm LD3LN : LDN_Lane_BHSD<0b0, 0b1, "VTriple", "ld3">;
4071 defm LD4LN : LDN_Lane_BHSD<0b1, 0b1, "VQuad", "ld4">;
4072
4073 multiclass LD1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
4074                           Operand ImmOp, Operand ImmOp2, PatFrag LoadOp,
4075                           Instruction INST> {
4076   def : Pat<(VTy (vector_insert (VTy VPR64:$src),
4077                      (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp:$lane))),
4078             (VTy (EXTRACT_SUBREG
4079                      (INST GPR64xsp:$Rn,
4080                            (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
4081                            ImmOp:$lane),
4082                      sub_64))>;
4083
4084   def : Pat<(VTy2 (vector_insert (VTy2 VPR128:$src),
4085                       (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp2:$lane))),
4086             (VTy2 (INST GPR64xsp:$Rn, VPR128:$src, ImmOp2:$lane))>;
4087 }
4088
4089 // Match all LD1LN instructions
4090 defm : LD1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
4091                       extloadi8, LD1LN_B>;
4092
4093 defm : LD1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
4094                       extloadi16, LD1LN_H>;
4095
4096 defm : LD1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
4097                       load, LD1LN_S>;
4098 defm : LD1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
4099                       load, LD1LN_S>;
4100
4101 defm : LD1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
4102                       load, LD1LN_D>;
4103 defm : LD1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
4104                       load, LD1LN_D>;
4105
4106 class NeonI_STN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4107                      Operand ImmOp, string asmop>
4108     : NeonI_LdStOne_Lane<0, r, op2_1, op0,
4109                          (outs), (ins GPR64xsp:$Rn, VList:$Rt, ImmOp:$lane),
4110                          asmop # "\t$Rt[$lane], [$Rn]",
4111                          [],
4112                          NoItinerary>,
4113       Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
4114   let mayStore = 1;
4115   let neverHasSideEffects = 1;
4116   let hasExtraDefRegAllocReq = 1;
4117 }
4118
4119 multiclass STN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
4120   def _B : NeonI_STN_Lane<r, 0b00, op0,
4121                           !cast<RegisterOperand>(List # "B_operand"),
4122                           neon_uimm4_bare, asmop> {
4123     let Inst{12-10} = lane{2-0};
4124     let Inst{30} = lane{3};
4125   }
4126
4127   def _H : NeonI_STN_Lane<r, 0b01, op0,
4128                           !cast<RegisterOperand>(List # "H_operand"),
4129                           neon_uimm3_bare, asmop> {
4130     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4131     let Inst{30} = lane{2};
4132   }
4133
4134   def _S : NeonI_STN_Lane<r, 0b10, op0,
4135                           !cast<RegisterOperand>(List # "S_operand"),
4136                            neon_uimm2_bare, asmop> {
4137     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4138     let Inst{30} = lane{1};
4139   }
4140
4141   def _D : NeonI_STN_Lane<r, 0b10, op0,
4142                           !cast<RegisterOperand>(List # "D_operand"),
4143                           neon_uimm1_bare, asmop>{
4144     let Inst{12-10} = 0b001;
4145     let Inst{30} = lane{0};
4146   }
4147 }
4148
4149 // Store single 1-element structure from one lane of 1 register.
4150 defm ST1LN : STN_Lane_BHSD<0b0, 0b0, "VOne", "st1">;
4151
4152 // Store single N-element structure from one lane of N consecutive registers
4153 // (N = 2,3,4)
4154 defm ST2LN : STN_Lane_BHSD<0b1, 0b0, "VPair", "st2">;
4155 defm ST3LN : STN_Lane_BHSD<0b0, 0b1, "VTriple", "st3">;
4156 defm ST4LN : STN_Lane_BHSD<0b1, 0b1, "VQuad", "st4">;
4157
4158 multiclass ST1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
4159                           Operand ImmOp, Operand ImmOp2, PatFrag StoreOp,
4160                           Instruction INST> {
4161   def : Pat<(StoreOp (DTy (vector_extract (VTy VPR64:$Rt), ImmOp:$lane)),
4162                      GPR64xsp:$Rn),
4163             (INST GPR64xsp:$Rn,
4164                   (SUBREG_TO_REG (i64 0), VPR64:$Rt, sub_64),
4165                   ImmOp:$lane)>;
4166
4167   def : Pat<(StoreOp (DTy (vector_extract (VTy2 VPR128:$Rt), ImmOp2:$lane)),
4168                      GPR64xsp:$Rn),
4169             (INST GPR64xsp:$Rn, VPR128:$Rt, ImmOp2:$lane)>;
4170 }
4171
4172 // Match all ST1LN instructions
4173 defm : ST1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
4174                       truncstorei8, ST1LN_B>;
4175
4176 defm : ST1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
4177                       truncstorei16, ST1LN_H>;
4178
4179 defm : ST1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
4180                       store, ST1LN_S>;
4181 defm : ST1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
4182                       store, ST1LN_S>;
4183
4184 defm : ST1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
4185                       store, ST1LN_D>;
4186 defm : ST1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
4187                       store, ST1LN_D>;
4188
4189 // End of vector load/store single N-element structure (class SIMD lsone).
4190
4191
4192 // The following are post-index load/store single N-element instructions
4193 // (class SIMD lsone-post)
4194
4195 multiclass NeonI_LDN_WB_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
4196                             RegisterOperand VecList, Operand ImmTy,
4197                             string asmop> {
4198   let mayLoad = 1, neverHasSideEffects = 1, Constraints = "$wb = $Rn",
4199   DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
4200     def _fixed : NeonI_LdOne_Dup_Post<q, r, opcode, size,
4201                       (outs VecList:$Rt, GPR64xsp:$wb),
4202                       (ins GPR64xsp:$Rn, ImmTy:$amt),
4203                       asmop # "\t$Rt, [$Rn], $amt",
4204                       [],
4205                       NoItinerary>,
4206                  Sched<[WriteVecLd, WriteVecLd, ReadVecLd]> {
4207       let Rm = 0b11111;
4208     }
4209
4210     def _register : NeonI_LdOne_Dup_Post<q, r, opcode, size,
4211                       (outs VecList:$Rt, GPR64xsp:$wb),
4212                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
4213                       asmop # "\t$Rt, [$Rn], $Rm",
4214                       [],
4215                       NoItinerary>,
4216                     Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]>;
4217   }
4218 }
4219
4220 multiclass LDWB_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop,
4221                          Operand uimm_b, Operand uimm_h,
4222                          Operand uimm_s, Operand uimm_d> {
4223   defm _8B : NeonI_LDN_WB_Dup<0, r, opcode, 0b00,
4224                               !cast<RegisterOperand>(List # "8B_operand"),
4225                               uimm_b, asmop>;
4226
4227   defm _4H : NeonI_LDN_WB_Dup<0, r, opcode, 0b01,
4228                               !cast<RegisterOperand>(List # "4H_operand"),
4229                               uimm_h, asmop>;
4230
4231   defm _2S : NeonI_LDN_WB_Dup<0, r, opcode, 0b10,
4232                               !cast<RegisterOperand>(List # "2S_operand"),
4233                               uimm_s, asmop>;
4234
4235   defm _1D : NeonI_LDN_WB_Dup<0, r, opcode, 0b11,
4236                               !cast<RegisterOperand>(List # "1D_operand"),
4237                               uimm_d, asmop>;
4238
4239   defm _16B : NeonI_LDN_WB_Dup<1, r, opcode, 0b00,
4240                                !cast<RegisterOperand>(List # "16B_operand"),
4241                                uimm_b, asmop>;
4242
4243   defm _8H : NeonI_LDN_WB_Dup<1, r, opcode, 0b01,
4244                               !cast<RegisterOperand>(List # "8H_operand"),
4245                               uimm_h, asmop>;
4246
4247   defm _4S : NeonI_LDN_WB_Dup<1, r, opcode, 0b10,
4248                               !cast<RegisterOperand>(List # "4S_operand"),
4249                               uimm_s, asmop>;
4250
4251   defm _2D : NeonI_LDN_WB_Dup<1, r, opcode, 0b11,
4252                               !cast<RegisterOperand>(List # "2D_operand"),
4253                               uimm_d, asmop>;
4254 }
4255
4256 // Post-index load single 1-element structure to all lanes of 1 register
4257 defm LD1R_WB : LDWB_Dup_BHSD<0b0, 0b110, "VOne", "ld1r", uimm_exact1,
4258                              uimm_exact2, uimm_exact4, uimm_exact8>;
4259
4260 // Post-index load single N-element structure to all lanes of N consecutive
4261 // registers (N = 2,3,4)
4262 defm LD2R_WB : LDWB_Dup_BHSD<0b1, 0b110, "VPair", "ld2r", uimm_exact2,
4263                              uimm_exact4, uimm_exact8, uimm_exact16>;
4264 defm LD3R_WB : LDWB_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r", uimm_exact3,
4265                              uimm_exact6, uimm_exact12, uimm_exact24>;
4266 defm LD4R_WB : LDWB_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r", uimm_exact4,
4267                              uimm_exact8, uimm_exact16, uimm_exact32>;
4268
4269 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
4270     Constraints = "$Rn = $wb, $Rt = $src",
4271     DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
4272   class LDN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4273                                 Operand ImmTy, Operand ImmOp, string asmop>
4274       : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
4275                                 (outs VList:$Rt, GPR64xsp:$wb),
4276                                 (ins GPR64xsp:$Rn, ImmTy:$amt,
4277                                     VList:$src, ImmOp:$lane),
4278                                 asmop # "\t$Rt[$lane], [$Rn], $amt",
4279                                 [],
4280                                 NoItinerary>,
4281         Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]> {
4282     let Rm = 0b11111;
4283   }
4284
4285   class LDN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4286                                  Operand ImmTy, Operand ImmOp, string asmop>
4287       : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
4288                                 (outs VList:$Rt, GPR64xsp:$wb),
4289                                 (ins GPR64xsp:$Rn, GPR64noxzr:$Rm,
4290                                     VList:$src, ImmOp:$lane),
4291                                 asmop # "\t$Rt[$lane], [$Rn], $Rm",
4292                                 [],
4293                                 NoItinerary>,
4294         Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd, ReadVecLd]>;
4295 }
4296
4297 multiclass LD_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
4298                            Operand uimm_b, Operand uimm_h,
4299                            Operand uimm_s, Operand uimm_d> {
4300   def _B_fixed : LDN_WBFx_Lane<r, 0b00, op0,
4301                                !cast<RegisterOperand>(List # "B_operand"),
4302                                uimm_b, neon_uimm4_bare, asmop> {
4303     let Inst{12-10} = lane{2-0};
4304     let Inst{30} = lane{3};
4305   }
4306
4307   def _B_register : LDN_WBReg_Lane<r, 0b00, op0,
4308                                    !cast<RegisterOperand>(List # "B_operand"),
4309                                    uimm_b, neon_uimm4_bare, asmop> {
4310     let Inst{12-10} = lane{2-0};
4311     let Inst{30} = lane{3};
4312   }
4313
4314   def _H_fixed : LDN_WBFx_Lane<r, 0b01, op0,
4315                                !cast<RegisterOperand>(List # "H_operand"),
4316                                uimm_h, neon_uimm3_bare, asmop> {
4317     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4318     let Inst{30} = lane{2};
4319   }
4320
4321   def _H_register : LDN_WBReg_Lane<r, 0b01, op0,
4322                                    !cast<RegisterOperand>(List # "H_operand"),
4323                                    uimm_h, neon_uimm3_bare, asmop> {
4324     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4325     let Inst{30} = lane{2};
4326   }
4327
4328   def _S_fixed : LDN_WBFx_Lane<r, 0b10, op0,
4329                                !cast<RegisterOperand>(List # "S_operand"),
4330                                uimm_s, neon_uimm2_bare, asmop> {
4331     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4332     let Inst{30} = lane{1};
4333   }
4334
4335   def _S_register : LDN_WBReg_Lane<r, 0b10, op0,
4336                                    !cast<RegisterOperand>(List # "S_operand"),
4337                                    uimm_s, neon_uimm2_bare, asmop> {
4338     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4339     let Inst{30} = lane{1};
4340   }
4341
4342   def _D_fixed : LDN_WBFx_Lane<r, 0b10, op0,
4343                                !cast<RegisterOperand>(List # "D_operand"),
4344                                uimm_d, neon_uimm1_bare, asmop> {
4345     let Inst{12-10} = 0b001;
4346     let Inst{30} = lane{0};
4347   }
4348
4349   def _D_register : LDN_WBReg_Lane<r, 0b10, op0,
4350                                    !cast<RegisterOperand>(List # "D_operand"),
4351                                    uimm_d, neon_uimm1_bare, asmop> {
4352     let Inst{12-10} = 0b001;
4353     let Inst{30} = lane{0};
4354   }
4355 }
4356
4357 // Post-index load single 1-element structure to one lane of 1 register.
4358 defm LD1LN_WB : LD_Lane_WB_BHSD<0b0, 0b0, "VOne", "ld1", uimm_exact1,
4359                                 uimm_exact2, uimm_exact4, uimm_exact8>;
4360
4361 // Post-index load single N-element structure to one lane of N consecutive
4362 // registers
4363 // (N = 2,3,4)
4364 defm LD2LN_WB : LD_Lane_WB_BHSD<0b1, 0b0, "VPair", "ld2", uimm_exact2,
4365                                 uimm_exact4, uimm_exact8, uimm_exact16>;
4366 defm LD3LN_WB : LD_Lane_WB_BHSD<0b0, 0b1, "VTriple", "ld3", uimm_exact3,
4367                                 uimm_exact6, uimm_exact12, uimm_exact24>;
4368 defm LD4LN_WB : LD_Lane_WB_BHSD<0b1, 0b1, "VQuad", "ld4", uimm_exact4,
4369                                 uimm_exact8, uimm_exact16, uimm_exact32>;
4370
4371 let mayStore = 1, neverHasSideEffects = 1,
4372     hasExtraDefRegAllocReq = 1, Constraints = "$Rn = $wb",
4373     DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
4374   class STN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4375                       Operand ImmTy, Operand ImmOp, string asmop>
4376       : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
4377                                 (outs GPR64xsp:$wb),
4378                                 (ins GPR64xsp:$Rn, ImmTy:$amt,
4379                                     VList:$Rt, ImmOp:$lane),
4380                                 asmop # "\t$Rt[$lane], [$Rn], $amt",
4381                                 [],
4382                                 NoItinerary>,
4383         Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
4384     let Rm = 0b11111;
4385   }
4386
4387   class STN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4388                        Operand ImmTy, Operand ImmOp, string asmop>
4389       : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
4390                                 (outs GPR64xsp:$wb),
4391                                 (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VList:$Rt,
4392                                     ImmOp:$lane),
4393                                 asmop # "\t$Rt[$lane], [$Rn], $Rm",
4394                                 [],
4395                                 NoItinerary>,
4396         Sched<[WriteVecSt, ReadVecSt, ReadVecSt, ReadVecSt]>;
4397 }
4398
4399 multiclass ST_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
4400                            Operand uimm_b, Operand uimm_h,
4401                            Operand uimm_s, Operand uimm_d> {
4402   def _B_fixed : STN_WBFx_Lane<r, 0b00, op0,
4403                                !cast<RegisterOperand>(List # "B_operand"),
4404                                uimm_b, neon_uimm4_bare, asmop> {
4405     let Inst{12-10} = lane{2-0};
4406     let Inst{30} = lane{3};
4407   }
4408
4409   def _B_register : STN_WBReg_Lane<r, 0b00, op0,
4410                                    !cast<RegisterOperand>(List # "B_operand"),
4411                                    uimm_b, neon_uimm4_bare, asmop> {
4412     let Inst{12-10} = lane{2-0};
4413     let Inst{30} = lane{3};
4414   }
4415
4416   def _H_fixed : STN_WBFx_Lane<r, 0b01, op0,
4417                                !cast<RegisterOperand>(List # "H_operand"),
4418                                uimm_h, neon_uimm3_bare, asmop> {
4419     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4420     let Inst{30} = lane{2};
4421   }
4422
4423   def _H_register : STN_WBReg_Lane<r, 0b01, op0,
4424                                    !cast<RegisterOperand>(List # "H_operand"),
4425                                    uimm_h, neon_uimm3_bare, asmop> {
4426     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4427     let Inst{30} = lane{2};
4428   }
4429
4430   def _S_fixed : STN_WBFx_Lane<r, 0b10, op0,
4431                                !cast<RegisterOperand>(List # "S_operand"),
4432                                uimm_s, neon_uimm2_bare, asmop> {
4433     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4434     let Inst{30} = lane{1};
4435   }
4436
4437   def _S_register : STN_WBReg_Lane<r, 0b10, op0,
4438                                    !cast<RegisterOperand>(List # "S_operand"),
4439                                    uimm_s, neon_uimm2_bare, asmop> {
4440     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4441     let Inst{30} = lane{1};
4442   }
4443
4444   def _D_fixed : STN_WBFx_Lane<r, 0b10, op0,
4445                                !cast<RegisterOperand>(List # "D_operand"),
4446                                uimm_d, neon_uimm1_bare, asmop> {
4447     let Inst{12-10} = 0b001;
4448     let Inst{30} = lane{0};
4449   }
4450
4451   def _D_register : STN_WBReg_Lane<r, 0b10, op0,
4452                                    !cast<RegisterOperand>(List # "D_operand"),
4453                                    uimm_d, neon_uimm1_bare, asmop> {
4454     let Inst{12-10} = 0b001;
4455     let Inst{30} = lane{0};
4456   }
4457 }
4458
4459 // Post-index store single 1-element structure from one lane of 1 register.
4460 defm ST1LN_WB : ST_Lane_WB_BHSD<0b0, 0b0, "VOne", "st1", uimm_exact1,
4461                                 uimm_exact2, uimm_exact4, uimm_exact8>;
4462
4463 // Post-index store single N-element structure from one lane of N consecutive
4464 // registers (N = 2,3,4)
4465 defm ST2LN_WB : ST_Lane_WB_BHSD<0b1, 0b0, "VPair", "st2", uimm_exact2,
4466                                 uimm_exact4, uimm_exact8, uimm_exact16>;
4467 defm ST3LN_WB : ST_Lane_WB_BHSD<0b0, 0b1, "VTriple", "st3", uimm_exact3,
4468                                 uimm_exact6, uimm_exact12, uimm_exact24>;
4469 defm ST4LN_WB : ST_Lane_WB_BHSD<0b1, 0b1, "VQuad", "st4", uimm_exact4,
4470                                 uimm_exact8, uimm_exact16, uimm_exact32>;
4471
4472 // End of post-index load/store single N-element instructions
4473 // (class SIMD lsone-post)
4474
4475 // Neon Scalar instructions implementation
4476 // Scalar Three Same
4477
4478 class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
4479                              RegisterClass FPRC>
4480   : NeonI_Scalar3Same<u, size, opcode,
4481                       (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
4482                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4483                       [],
4484                       NoItinerary>,
4485     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
4486
4487 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
4488   : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
4489
4490 multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
4491                                       bit Commutable = 0> {
4492   let isCommutable = Commutable in {
4493     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
4494     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
4495   }
4496 }
4497
4498 multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
4499                                       string asmop, bit Commutable = 0> {
4500   let isCommutable = Commutable in {
4501     def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
4502     def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
4503   }
4504 }
4505
4506 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
4507                                         string asmop, bit Commutable = 0> {
4508   let isCommutable = Commutable in {
4509     def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
4510     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
4511     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
4512     def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
4513   }
4514 }
4515
4516 multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
4517                                             Instruction INSTD> {
4518   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
4519             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4520 }
4521
4522 multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
4523                                                Instruction INSTB,
4524                                                Instruction INSTH,
4525                                                Instruction INSTS,
4526                                                Instruction INSTD>
4527   : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
4528   def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
4529            (INSTB FPR8:$Rn, FPR8:$Rm)>;
4530   def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4531            (INSTH FPR16:$Rn, FPR16:$Rm)>;
4532   def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4533            (INSTS FPR32:$Rn, FPR32:$Rm)>;
4534 }
4535
4536 multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
4537                                              Instruction INSTH,
4538                                              Instruction INSTS> {
4539   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4540             (INSTH FPR16:$Rn, FPR16:$Rm)>;
4541   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4542             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4543 }
4544
4545 multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
4546                                              ValueType SResTy, ValueType STy,
4547                                              Instruction INSTS, ValueType DResTy,
4548                                              ValueType DTy, Instruction INSTD> {
4549   def : Pat<(SResTy (opnode (STy FPR32:$Rn), (STy FPR32:$Rm))),
4550             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4551   def : Pat<(DResTy (opnode (DTy FPR64:$Rn), (DTy FPR64:$Rm))),
4552             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4553 }
4554
4555 class Neon_Scalar3Same_cmp_V1_D_size_patterns<CondCode CC,
4556                                               Instruction INSTD>
4557   : Pat<(v1i64 (Neon_cmp (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm), CC)),
4558         (INSTD FPR64:$Rn, FPR64:$Rm)>;
4559
4560 // Scalar Three Different
4561
4562 class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
4563                              RegisterClass FPRCD, RegisterClass FPRCS>
4564   : NeonI_Scalar3Diff<u, size, opcode,
4565                       (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
4566                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4567                       [],
4568                       NoItinerary>,
4569     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
4570
4571 multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
4572   def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
4573   def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
4574 }
4575
4576 multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
4577   let Constraints = "$Src = $Rd" in {
4578     def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
4579                        (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
4580                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4581                        [],
4582                        NoItinerary>,
4583               Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]>;
4584     def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
4585                        (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
4586                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4587                        [],
4588                        NoItinerary>,
4589               Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]>;
4590   }
4591 }
4592
4593 multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
4594                                              Instruction INSTH,
4595                                              Instruction INSTS> {
4596   def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4597             (INSTH FPR16:$Rn, FPR16:$Rm)>;
4598   def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4599             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4600 }
4601
4602 multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
4603                                              Instruction INSTH,
4604                                              Instruction INSTS> {
4605   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4606             (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
4607   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4608             (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
4609 }
4610
4611 // Scalar Two Registers Miscellaneous
4612
4613 class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
4614                              RegisterClass FPRCD, RegisterClass FPRCS>
4615   : NeonI_Scalar2SameMisc<u, size, opcode,
4616                           (outs FPRCD:$Rd), (ins FPRCS:$Rn),
4617                           !strconcat(asmop, "\t$Rd, $Rn"),
4618                           [],
4619                           NoItinerary>,
4620     Sched<[WriteFPALU, ReadFPALU]>;
4621
4622 multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
4623                                          string asmop> {
4624   def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
4625                                       FPR32>;
4626   def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
4627                                       FPR64>;
4628 }
4629
4630 multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
4631   def dd : NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
4632 }
4633
4634 multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
4635   : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
4636   def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
4637   def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
4638   def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
4639 }
4640
4641 class NeonI_Scalar2SameMisc_fcvtxn_D_size<bit u, bits<5> opcode, string asmop>
4642   : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR32, FPR64>;
4643
4644 multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
4645                                                  string asmop> {
4646   def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
4647   def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
4648   def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
4649 }
4650
4651 class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
4652                                        string asmop, RegisterClass FPRC>
4653   : NeonI_Scalar2SameMisc<u, size, opcode,
4654                           (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
4655                           !strconcat(asmop, "\t$Rd, $Rn"),
4656                           [],
4657                           NoItinerary>,
4658     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
4659
4660 multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
4661                                                  string asmop> {
4662
4663   let Constraints = "$Src = $Rd" in {
4664     def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
4665     def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
4666     def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
4667     def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
4668   }
4669 }
4670
4671 class Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<SDPatternOperator opnode,
4672                                                   Instruction INSTD>
4673   : Pat<(f32 (opnode (f64 FPR64:$Rn))),
4674         (INSTD FPR64:$Rn)>;
4675
4676 multiclass Neon_Scalar2SameMisc_fcvt_SD_size_patterns<SDPatternOperator opnode,
4677                                                       Instruction INSTS,
4678                                                       Instruction INSTD> {
4679   def : Pat<(v1i32 (opnode (f32 FPR32:$Rn))),
4680             (INSTS FPR32:$Rn)>;
4681   def : Pat<(v1i64 (opnode (f64 FPR64:$Rn))),
4682             (INSTD FPR64:$Rn)>;
4683 }
4684
4685 class Neon_Scalar2SameMisc_vcvt_D_size_patterns<SDPatternOperator opnode,
4686                                                 Instruction INSTD>
4687   : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))),
4688             (INSTD FPR64:$Rn)>;
4689
4690 multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator opnode,
4691                                                      Instruction INSTS,
4692                                                      Instruction INSTD> {
4693   def : Pat<(f32 (opnode (v1i32 FPR32:$Rn))),
4694             (INSTS FPR32:$Rn)>;
4695   def : Pat<(f64 (opnode (v1i64 FPR64:$Rn))),
4696             (INSTD FPR64:$Rn)>;
4697 }
4698
4699 multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
4700                                                  Instruction INSTS,
4701                                                  Instruction INSTD> {
4702   def : Pat<(f32 (opnode (f32 FPR32:$Rn))),
4703             (INSTS FPR32:$Rn)>;
4704   def : Pat<(f64 (opnode (f64 FPR64:$Rn))),
4705             (INSTD FPR64:$Rn)>;
4706 }
4707
4708 class Neon_Scalar2SameMisc_V1_D_size_patterns<SDPatternOperator opnode,
4709                                               Instruction INSTD>
4710   : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
4711         (INSTD FPR64:$Rn)>;
4712
4713 class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
4714   : NeonI_Scalar2SameMisc<u, 0b11, opcode,
4715                           (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
4716                           !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4717                           [],
4718                           NoItinerary>,
4719     Sched<[WriteFPALU, ReadFPALU]>;
4720
4721 multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
4722                                               string asmop> {
4723   def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
4724                            (outs FPR32:$Rd), (ins FPR32:$Rn, fpzz32:$FPImm),
4725                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
4726                            [],
4727                            NoItinerary>,
4728             Sched<[WriteFPALU, ReadFPALU]>;
4729   def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
4730                            (outs FPR64:$Rd), (ins FPR64:$Rn, fpzz32:$FPImm),
4731                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
4732                            [],
4733                            NoItinerary>,
4734             Sched<[WriteFPALU, ReadFPALU]>;
4735 }
4736
4737 class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
4738                                                 Instruction INSTD>
4739   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4740                        (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
4741         (INSTD FPR64:$Rn, 0)>;
4742
4743 class Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<CondCode CC,
4744                                                    Instruction INSTD>
4745   : Pat<(v1i64 (Neon_cmpz (v1i64 FPR64:$Rn),
4746                           (i32 neon_uimm0:$Imm), CC)),
4747         (INSTD FPR64:$Rn, neon_uimm0:$Imm)>;
4748
4749 multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
4750                                                       CondCode CC,
4751                                                       Instruction INSTS,
4752                                                       Instruction INSTD> {
4753   def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 fpzz32:$FPImm))),
4754             (INSTS FPR32:$Rn, fpzz32:$FPImm)>;
4755   def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f32 fpzz32:$FPImm))),
4756             (INSTD FPR64:$Rn, fpzz32:$FPImm)>;
4757   def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpzz32:$FPImm), CC)),
4758             (INSTD FPR64:$Rn, fpzz32:$FPImm)>;
4759 }
4760
4761 multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
4762                                                 Instruction INSTD> {
4763   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
4764             (INSTD FPR64:$Rn)>;
4765 }
4766
4767 multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
4768                                                    Instruction INSTB,
4769                                                    Instruction INSTH,
4770                                                    Instruction INSTS,
4771                                                    Instruction INSTD>
4772   : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
4773   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
4774             (INSTB FPR8:$Rn)>;
4775   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
4776             (INSTH FPR16:$Rn)>;
4777   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
4778             (INSTS FPR32:$Rn)>;
4779 }
4780
4781 multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
4782                                                        SDPatternOperator opnode,
4783                                                        Instruction INSTH,
4784                                                        Instruction INSTS,
4785                                                        Instruction INSTD> {
4786   def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
4787             (INSTH FPR16:$Rn)>;
4788   def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
4789             (INSTS FPR32:$Rn)>;
4790   def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
4791             (INSTD FPR64:$Rn)>;
4792
4793 }
4794
4795 multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
4796                                                        SDPatternOperator opnode,
4797                                                        Instruction INSTB,
4798                                                        Instruction INSTH,
4799                                                        Instruction INSTS,
4800                                                        Instruction INSTD> {
4801   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
4802             (INSTB FPR8:$Src, FPR8:$Rn)>;
4803   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
4804             (INSTH FPR16:$Src, FPR16:$Rn)>;
4805   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
4806             (INSTS FPR32:$Src, FPR32:$Rn)>;
4807   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
4808             (INSTD FPR64:$Src, FPR64:$Rn)>;
4809 }
4810
4811 // Scalar Shift By Immediate
4812
4813 class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
4814                                 RegisterClass FPRC, Operand ImmTy>
4815   : NeonI_ScalarShiftImm<u, opcode,
4816                          (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
4817                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4818                          [], NoItinerary>,
4819     Sched<[WriteFPALU, ReadFPALU]>;
4820
4821 multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
4822                                             string asmop> {
4823   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4824     bits<6> Imm;
4825     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4826     let Inst{21-16} = Imm;
4827   }
4828 }
4829
4830 multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
4831                                                string asmop>
4832   : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
4833   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
4834     bits<3> Imm;
4835     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4836     let Inst{18-16} = Imm;
4837   }
4838   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
4839     bits<4> Imm;
4840     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4841     let Inst{19-16} = Imm;
4842   }
4843   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4844     bits<5> Imm;
4845     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4846     let Inst{20-16} = Imm;
4847   }
4848 }
4849
4850 multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
4851                                             string asmop> {
4852   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
4853     bits<6> Imm;
4854     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4855     let Inst{21-16} = Imm;
4856   }
4857 }
4858
4859 multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
4860                                               string asmop>
4861   : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
4862   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
4863     bits<3> Imm;
4864     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4865     let Inst{18-16} = Imm;
4866   }
4867   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
4868     bits<4> Imm;
4869     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4870     let Inst{19-16} = Imm;
4871   }
4872   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
4873     bits<5> Imm;
4874     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4875     let Inst{20-16} = Imm;
4876   }
4877 }
4878
4879 class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4880   : NeonI_ScalarShiftImm<u, opcode,
4881                          (outs FPR64:$Rd),
4882                          (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
4883                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4884                          [], NoItinerary>,
4885     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
4886     bits<6> Imm;
4887     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4888     let Inst{21-16} = Imm;
4889     let Constraints = "$Src = $Rd";
4890 }
4891
4892 class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4893   : NeonI_ScalarShiftImm<u, opcode,
4894                          (outs FPR64:$Rd),
4895                          (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
4896                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4897                          [], NoItinerary>,
4898     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
4899     bits<6> Imm;
4900     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4901     let Inst{21-16} = Imm;
4902     let Constraints = "$Src = $Rd";
4903 }
4904
4905 class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
4906                                        RegisterClass FPRCD, RegisterClass FPRCS,
4907                                        Operand ImmTy>
4908   : NeonI_ScalarShiftImm<u, opcode,
4909                          (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
4910                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4911                          [], NoItinerary>,
4912     Sched<[WriteFPALU, ReadFPALU]>;
4913
4914 multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
4915                                                 string asmop> {
4916   def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
4917                                              shr_imm8> {
4918     bits<3> Imm;
4919     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4920     let Inst{18-16} = Imm;
4921   }
4922   def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
4923                                              shr_imm16> {
4924     bits<4> Imm;
4925     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4926     let Inst{19-16} = Imm;
4927   }
4928   def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
4929                                              shr_imm32> {
4930     bits<5> Imm;
4931     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4932     let Inst{20-16} = Imm;
4933   }
4934 }
4935
4936 multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
4937   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4938     bits<5> Imm;
4939     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4940     let Inst{20-16} = Imm;
4941   }
4942   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4943     bits<6> Imm;
4944     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4945     let Inst{21-16} = Imm;
4946   }
4947 }
4948
4949 multiclass Neon_ScalarShiftRImm_D_size_patterns<SDPatternOperator opnode,
4950                                                Instruction INSTD> {
4951   def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
4952                 (INSTD FPR64:$Rn, imm:$Imm)>;
4953 }
4954
4955 multiclass Neon_ScalarShiftLImm_D_size_patterns<SDPatternOperator opnode,
4956                                                Instruction INSTD> {
4957   def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shl_imm64:$Imm))),
4958                 (INSTD FPR64:$Rn, imm:$Imm)>;
4959 }
4960
4961 class Neon_ScalarShiftLImm_V1_D_size_patterns<SDPatternOperator opnode,
4962                                              Instruction INSTD>
4963   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4964             (v1i64 (Neon_vdup (i32 shl_imm64:$Imm))))),
4965         (INSTD FPR64:$Rn, imm:$Imm)>;
4966
4967 class Neon_ScalarShiftRImm_V1_D_size_patterns<SDPatternOperator opnode,
4968                                              Instruction INSTD>
4969   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4970             (v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))),
4971         (INSTD FPR64:$Rn, imm:$Imm)>;
4972
4973 multiclass Neon_ScalarShiftLImm_BHSD_size_patterns<SDPatternOperator opnode,
4974                                                    Instruction INSTB,
4975                                                    Instruction INSTH,
4976                                                    Instruction INSTS,
4977                                                    Instruction INSTD>
4978   : Neon_ScalarShiftLImm_D_size_patterns<opnode, INSTD> {
4979   def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 shl_imm8:$Imm))),
4980                 (INSTB FPR8:$Rn, imm:$Imm)>;
4981   def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 shl_imm16:$Imm))),
4982                 (INSTH FPR16:$Rn, imm:$Imm)>;
4983   def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 shl_imm32:$Imm))),
4984                 (INSTS FPR32:$Rn, imm:$Imm)>;
4985 }
4986
4987 class Neon_ScalarShiftLImm_accum_D_size_patterns<SDPatternOperator opnode,
4988                                                 Instruction INSTD>
4989   : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
4990             (i32 shl_imm64:$Imm))),
4991         (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
4992
4993 class Neon_ScalarShiftRImm_accum_D_size_patterns<SDPatternOperator opnode,
4994                                                 Instruction INSTD>
4995   : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
4996             (i32 shr_imm64:$Imm))),
4997         (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
4998
4999 multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
5000                                                        SDPatternOperator opnode,
5001                                                        Instruction INSTH,
5002                                                        Instruction INSTS,
5003                                                        Instruction INSTD> {
5004   def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 shr_imm16:$Imm))),
5005                 (INSTH FPR16:$Rn, imm:$Imm)>;
5006   def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
5007                 (INSTS FPR32:$Rn, imm:$Imm)>;
5008   def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5009                 (INSTD FPR64:$Rn, imm:$Imm)>;
5010 }
5011
5012 multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator opnode,
5013                                                       Instruction INSTS,
5014                                                       Instruction INSTD> {
5015   def ssi : Pat<(f32 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
5016                 (INSTS FPR32:$Rn, imm:$Imm)>;
5017   def ddi : Pat<(f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5018                 (INSTD FPR64:$Rn, imm:$Imm)>;
5019 }
5020
5021 multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator opnode,
5022                                                       Instruction INSTS,
5023                                                       Instruction INSTD> {
5024   def ssi : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
5025                 (INSTS FPR32:$Rn, imm:$Imm)>;
5026   def ddi : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5027                 (INSTD FPR64:$Rn, imm:$Imm)>;
5028 }
5029
5030 // Scalar Signed Shift Right (Immediate)
5031 defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
5032 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
5033 // Pattern to match llvm.arm.* intrinsic.
5034 def : Neon_ScalarShiftRImm_V1_D_size_patterns<sra, SSHRddi>;
5035
5036 // Scalar Unsigned Shift Right (Immediate)
5037 defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
5038 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
5039 // Pattern to match llvm.arm.* intrinsic.
5040 def : Neon_ScalarShiftRImm_V1_D_size_patterns<srl, USHRddi>;
5041
5042 // Scalar Signed Rounding Shift Right (Immediate)
5043 defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
5044 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
5045
5046 // Scalar Unigned Rounding Shift Right (Immediate)
5047 defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
5048 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
5049
5050 // Scalar Signed Shift Right and Accumulate (Immediate)
5051 def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
5052 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5053           <int_aarch64_neon_vsrads_n, SSRA>;
5054
5055 // Scalar Unsigned Shift Right and Accumulate (Immediate)
5056 def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
5057 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5058           <int_aarch64_neon_vsradu_n, USRA>;
5059
5060 // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
5061 def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
5062 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5063           <int_aarch64_neon_vrsrads_n, SRSRA>;
5064
5065 // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
5066 def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
5067 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5068           <int_aarch64_neon_vrsradu_n, URSRA>;
5069
5070 // Scalar Shift Left (Immediate)
5071 defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
5072 defm : Neon_ScalarShiftLImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
5073 // Pattern to match llvm.arm.* intrinsic.
5074 def : Neon_ScalarShiftLImm_V1_D_size_patterns<shl, SHLddi>;
5075
5076 // Signed Saturating Shift Left (Immediate)
5077 defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
5078 defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
5079                                                SQSHLbbi, SQSHLhhi,
5080                                                SQSHLssi, SQSHLddi>;
5081 // Pattern to match llvm.arm.* intrinsic.
5082 defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
5083
5084 // Unsigned Saturating Shift Left (Immediate)
5085 defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
5086 defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
5087                                                UQSHLbbi, UQSHLhhi,
5088                                                UQSHLssi, UQSHLddi>;
5089 // Pattern to match llvm.arm.* intrinsic.
5090 defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
5091
5092 // Signed Saturating Shift Left Unsigned (Immediate)
5093 defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
5094 defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
5095                                                SQSHLUbbi, SQSHLUhhi,
5096                                                SQSHLUssi, SQSHLUddi>;
5097
5098 // Shift Right And Insert (Immediate)
5099 def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
5100 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5101           <int_aarch64_neon_vsri, SRI>;
5102
5103 // Shift Left And Insert (Immediate)
5104 def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
5105 def : Neon_ScalarShiftLImm_accum_D_size_patterns
5106           <int_aarch64_neon_vsli, SLI>;
5107
5108 // Signed Saturating Shift Right Narrow (Immediate)
5109 defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
5110 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
5111                                                     SQSHRNbhi, SQSHRNhsi,
5112                                                     SQSHRNsdi>;
5113
5114 // Unsigned Saturating Shift Right Narrow (Immediate)
5115 defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
5116 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
5117                                                     UQSHRNbhi, UQSHRNhsi,
5118                                                     UQSHRNsdi>;
5119
5120 // Signed Saturating Rounded Shift Right Narrow (Immediate)
5121 defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
5122 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
5123                                                     SQRSHRNbhi, SQRSHRNhsi,
5124                                                     SQRSHRNsdi>;
5125
5126 // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
5127 defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
5128 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
5129                                                     UQRSHRNbhi, UQRSHRNhsi,
5130                                                     UQRSHRNsdi>;
5131
5132 // Signed Saturating Shift Right Unsigned Narrow (Immediate)
5133 defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
5134 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
5135                                                     SQSHRUNbhi, SQSHRUNhsi,
5136                                                     SQSHRUNsdi>;
5137
5138 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
5139 defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
5140 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
5141                                                     SQRSHRUNbhi, SQRSHRUNhsi,
5142                                                     SQRSHRUNsdi>;
5143
5144 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
5145 defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
5146 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxs2fp_n,
5147                                                   SCVTF_Nssi, SCVTF_Nddi>;
5148
5149 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
5150 defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
5151 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxu2fp_n,
5152                                                   UCVTF_Nssi, UCVTF_Nddi>;
5153
5154 // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
5155 defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
5156 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxs_n,
5157                                                   FCVTZS_Nssi, FCVTZS_Nddi>;
5158
5159 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
5160 defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
5161 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxu_n,
5162                                                   FCVTZU_Nssi, FCVTZU_Nddi>;
5163
5164 // Patterns For Convert Instructions Between v1f64 and v1i64
5165 class Neon_ScalarShiftImm_cvtf_v1f64_pattern<SDPatternOperator opnode,
5166                                              Instruction INST>
5167     : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5168           (INST FPR64:$Rn, imm:$Imm)>;
5169
5170 class Neon_ScalarShiftImm_fcvt_v1f64_pattern<SDPatternOperator opnode,
5171                                              Instruction INST>
5172     : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5173           (INST FPR64:$Rn, imm:$Imm)>;
5174
5175 def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxs2fp,
5176                                              SCVTF_Nddi>;
5177
5178 def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxu2fp,
5179                                              UCVTF_Nddi>;
5180
5181 def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxs,
5182                                              FCVTZS_Nddi>;
5183
5184 def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxu,
5185                                              FCVTZU_Nddi>;
5186
5187 // Scalar Integer Add
5188 let isCommutable = 1 in {
5189 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
5190 }
5191
5192 // Scalar Integer Sub
5193 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
5194
5195 // Pattern for Scalar Integer Add and Sub with D register only
5196 defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
5197 defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
5198
5199 // Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
5200 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
5201 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
5202 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
5203 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
5204
5205 // Scalar Integer Saturating Add (Signed, Unsigned)
5206 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
5207 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
5208
5209 // Scalar Integer Saturating Sub (Signed, Unsigned)
5210 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
5211 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
5212
5213
5214 // Patterns to match llvm.aarch64.* intrinsic for
5215 // Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
5216 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqadds, SQADDbbb,
5217                                            SQADDhhh, SQADDsss, SQADDddd>;
5218 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqaddu, UQADDbbb,
5219                                            UQADDhhh, UQADDsss, UQADDddd>;
5220 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubs, SQSUBbbb,
5221                                            SQSUBhhh, SQSUBsss, SQSUBddd>;
5222 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubu, UQSUBbbb,
5223                                            UQSUBhhh, UQSUBsss, UQSUBddd>;
5224
5225 // Scalar Integer Saturating Doubling Multiply Half High
5226 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in
5227 defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
5228
5229 // Scalar Integer Saturating Rounding Doubling Multiply Half High
5230 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
5231 defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
5232 }
5233
5234 // Patterns to match llvm.arm.* intrinsic for
5235 // Scalar Integer Saturating Doubling Multiply Half High and
5236 // Scalar Integer Saturating Rounding Doubling Multiply Half High
5237 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
5238                                                                SQDMULHsss>;
5239 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
5240                                                                 SQRDMULHsss>;
5241
5242 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in {
5243 // Scalar Floating-point Multiply Extended
5244 defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
5245 }
5246
5247 // Scalar Floating-point Reciprocal Step
5248 defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
5249 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vrecps, f32, f32,
5250                                          FRECPSsss, f64, f64, FRECPSddd>;
5251 def : Pat<(v1f64 (int_arm_neon_vrecps (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5252           (FRECPSddd FPR64:$Rn, FPR64:$Rm)>;
5253
5254 // Scalar Floating-point Reciprocal Square Root Step
5255 defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
5256 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vrsqrts, f32, f32,
5257                                          FRSQRTSsss, f64, f64, FRSQRTSddd>;
5258 def : Pat<(v1f64 (int_arm_neon_vrsqrts (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5259           (FRSQRTSddd FPR64:$Rn, FPR64:$Rm)>;
5260 def : Pat<(v1f64 (fsqrt (v1f64 FPR64:$Rn))), (FSQRTdd FPR64:$Rn)>;
5261
5262 // Patterns to match llvm.aarch64.* intrinsic for
5263 // Scalar Floating-point Multiply Extended,
5264 multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
5265                                                   Instruction INSTS,
5266                                                   Instruction INSTD> {
5267   def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
5268             (INSTS FPR32:$Rn, FPR32:$Rm)>;
5269   def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
5270             (INSTD FPR64:$Rn, FPR64:$Rm)>;
5271 }
5272
5273 defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
5274                                               FMULXsss, FMULXddd>;
5275 def : Pat<(v1f64 (int_aarch64_neon_vmulx (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5276           (FMULXddd FPR64:$Rn, FPR64:$Rm)>;
5277
5278 // Scalar Integer Shift Left (Signed, Unsigned)
5279 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
5280 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
5281
5282 // Patterns to match llvm.arm.* intrinsic for
5283 // Scalar Integer Shift Left (Signed, Unsigned)
5284 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
5285 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
5286
5287 // Patterns to match llvm.aarch64.* intrinsic for
5288 // Scalar Integer Shift Left (Signed, Unsigned)
5289 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
5290 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
5291
5292 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
5293 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
5294 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
5295
5296 // Patterns to match llvm.aarch64.* intrinsic for
5297 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
5298 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
5299                                            SQSHLhhh, SQSHLsss, SQSHLddd>;
5300 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
5301                                            UQSHLhhh, UQSHLsss, UQSHLddd>;
5302
5303 // Patterns to match llvm.arm.* intrinsic for
5304 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
5305 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
5306 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
5307
5308 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
5309 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
5310 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
5311
5312 // Patterns to match llvm.aarch64.* intrinsic for
5313 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
5314 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
5315 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
5316
5317 // Patterns to match llvm.arm.* intrinsic for
5318 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
5319 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
5320 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
5321
5322 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
5323 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
5324 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
5325
5326 // Patterns to match llvm.aarch64.* intrinsic for
5327 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
5328 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
5329                                            SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
5330 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
5331                                            UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
5332
5333 // Patterns to match llvm.arm.* intrinsic for
5334 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
5335 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
5336 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
5337
5338 let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC] in {
5339 // Signed Saturating Doubling Multiply-Add Long
5340 defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
5341 }
5342 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
5343                                             SQDMLALshh, SQDMLALdss>;
5344
5345 // Signed Saturating Doubling Multiply-Subtract Long
5346 let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC] in {
5347 defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
5348 }
5349 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
5350                                             SQDMLSLshh, SQDMLSLdss>;
5351
5352 // Signed Saturating Doubling Multiply Long
5353 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in {
5354 defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
5355 }
5356 defm : Neon_Scalar3Diff_HS_size_patterns<int_arm_neon_vqdmull,
5357                                          SQDMULLshh, SQDMULLdss>;
5358
5359 // Scalar Signed Integer Convert To Floating-point
5360 defm SCVTF  : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
5361 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtint2fps,
5362                                                  SCVTFss, SCVTFdd>;
5363
5364 // Scalar Unsigned Integer Convert To Floating-point
5365 defm UCVTF  : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
5366 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtint2fpu,
5367                                                  UCVTFss, UCVTFdd>;
5368
5369 // Scalar Floating-point Converts
5370 def FCVTXN : NeonI_Scalar2SameMisc_fcvtxn_D_size<0b1, 0b10110, "fcvtxn">;
5371 def : Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<int_aarch64_neon_fcvtxn,
5372                                                   FCVTXN>;
5373
5374 defm FCVTNS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11010, "fcvtns">;
5375 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtns,
5376                                                   FCVTNSss, FCVTNSdd>;
5377 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtns, FCVTNSdd>;
5378
5379 defm FCVTNU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11010, "fcvtnu">;
5380 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtnu,
5381                                                   FCVTNUss, FCVTNUdd>;
5382 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtnu, FCVTNUdd>;
5383
5384 defm FCVTMS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11011, "fcvtms">;
5385 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtms,
5386                                                   FCVTMSss, FCVTMSdd>;
5387 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtms, FCVTMSdd>;
5388
5389 defm FCVTMU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11011, "fcvtmu">;
5390 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtmu,
5391                                                   FCVTMUss, FCVTMUdd>;
5392 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtmu, FCVTMUdd>;
5393
5394 defm FCVTAS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11100, "fcvtas">;
5395 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtas,
5396                                                   FCVTASss, FCVTASdd>;
5397 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtas, FCVTASdd>;
5398
5399 defm FCVTAU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11100, "fcvtau">;
5400 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtau,
5401                                                   FCVTAUss, FCVTAUdd>;
5402 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtau, FCVTAUdd>;
5403
5404 defm FCVTPS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11010, "fcvtps">;
5405 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtps,
5406                                                   FCVTPSss, FCVTPSdd>;
5407 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtps, FCVTPSdd>;
5408
5409 defm FCVTPU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11010, "fcvtpu">;
5410 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtpu,
5411                                                   FCVTPUss, FCVTPUdd>;
5412 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtpu, FCVTPUdd>;
5413
5414 defm FCVTZS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11011, "fcvtzs">;
5415 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzs,
5416                                                   FCVTZSss, FCVTZSdd>;
5417 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_aarch64_neon_vcvtzs,
5418                                                 FCVTZSdd>;
5419
5420 defm FCVTZU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11011, "fcvtzu">;
5421 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzu,
5422                                                   FCVTZUss, FCVTZUdd>;
5423 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_aarch64_neon_vcvtzu,
5424                                                 FCVTZUdd>;
5425
5426 // Patterns For Convert Instructions Between v1f64 and v1i64
5427 class Neon_Scalar2SameMisc_cvtf_v1f64_pattern<SDPatternOperator opnode,
5428                                               Instruction INST>
5429     : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn))), (INST FPR64:$Rn)>;
5430
5431 class Neon_Scalar2SameMisc_fcvt_v1f64_pattern<SDPatternOperator opnode,
5432                                               Instruction INST>
5433     : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
5434
5435 def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<sint_to_fp, SCVTFdd>;
5436 def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<uint_to_fp, UCVTFdd>;
5437
5438 def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_sint, FCVTZSdd>;
5439 def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_uint, FCVTZUdd>;
5440
5441 // Scalar Floating-point Reciprocal Estimate
5442 defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
5443 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpe,
5444                                              FRECPEss, FRECPEdd>;
5445 def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrecpe,
5446                                               FRECPEdd>;
5447
5448 // Scalar Floating-point Reciprocal Exponent
5449 defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
5450 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
5451                                              FRECPXss, FRECPXdd>;
5452
5453 // Scalar Floating-point Reciprocal Square Root Estimate
5454 defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
5455 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrsqrte,
5456                                                  FRSQRTEss, FRSQRTEdd>;
5457 def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrsqrte,
5458                                               FRSQRTEdd>;
5459
5460 // Scalar Floating-point Round
5461 class Neon_ScalarFloatRound_pattern<SDPatternOperator opnode, Instruction INST>
5462     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
5463
5464 def : Neon_ScalarFloatRound_pattern<fceil, FRINTPdd>;
5465 def : Neon_ScalarFloatRound_pattern<ffloor, FRINTMdd>;
5466 def : Neon_ScalarFloatRound_pattern<ftrunc, FRINTZdd>;
5467 def : Neon_ScalarFloatRound_pattern<frint, FRINTXdd>;
5468 def : Neon_ScalarFloatRound_pattern<fnearbyint, FRINTIdd>;
5469 def : Neon_ScalarFloatRound_pattern<frnd, FRINTAdd>;
5470 def : Neon_ScalarFloatRound_pattern<int_aarch64_neon_frintn, FRINTNdd>;
5471
5472 // Scalar Integer Compare
5473
5474 // Scalar Compare Bitwise Equal
5475 def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
5476 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
5477
5478 class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
5479                                               Instruction INSTD,
5480                                               CondCode CC>
5481   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
5482         (INSTD FPR64:$Rn, FPR64:$Rm)>;
5483
5484 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
5485
5486 // Scalar Compare Signed Greather Than Or Equal
5487 def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
5488 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
5489 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGEddd, SETGE>;
5490
5491 // Scalar Compare Unsigned Higher Or Same
5492 def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
5493 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
5494 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHSddd, SETUGE>;
5495
5496 // Scalar Compare Unsigned Higher
5497 def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
5498 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
5499 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHIddd, SETUGT>;
5500
5501 // Scalar Compare Signed Greater Than
5502 def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
5503 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
5504 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGTddd, SETGT>;
5505
5506 // Scalar Compare Bitwise Test Bits
5507 def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
5508 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
5509 defm : Neon_Scalar3Same_D_size_patterns<Neon_tst, CMTSTddd>;
5510
5511 // Scalar Compare Bitwise Equal To Zero
5512 def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
5513 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
5514                                                 CMEQddi>;
5515 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETEQ, CMEQddi>;
5516
5517 // Scalar Compare Signed Greather Than Or Equal To Zero
5518 def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
5519 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
5520                                                 CMGEddi>;
5521 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGE, CMGEddi>;
5522
5523 // Scalar Compare Signed Greater Than Zero
5524 def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
5525 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
5526                                                 CMGTddi>;
5527 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGT, CMGTddi>;
5528
5529 // Scalar Compare Signed Less Than Or Equal To Zero
5530 def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
5531 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
5532                                                 CMLEddi>;
5533 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLE, CMLEddi>;
5534
5535 // Scalar Compare Less Than Zero
5536 def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
5537 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
5538                                                 CMLTddi>;
5539 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLT, CMLTddi>;
5540
5541 // Scalar Floating-point Compare
5542
5543 // Scalar Floating-point Compare Mask Equal
5544 defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
5545 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fceq, v1i32, f32,
5546                                          FCMEQsss, v1i64, f64, FCMEQddd>;
5547 def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
5548
5549 // Scalar Floating-point Compare Mask Equal To Zero
5550 defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
5551 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fceq, SETEQ,
5552                                                   FCMEQZssi, FCMEQZddi>;
5553
5554 // Scalar Floating-point Compare Mask Greater Than Or Equal
5555 defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
5556 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcge, v1i32, f32,
5557                                          FCMGEsss, v1i64, f64, FCMGEddd>;
5558 def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGE, FCMGEddd>;
5559
5560 // Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
5561 defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
5562 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcge, SETGE,
5563                                                   FCMGEZssi, FCMGEZddi>;
5564
5565 // Scalar Floating-point Compare Mask Greather Than
5566 defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
5567 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcgt, v1i32, f32,
5568                                          FCMGTsss, v1i64, f64, FCMGTddd>;
5569 def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGT, FCMGTddd>;
5570
5571 // Scalar Floating-point Compare Mask Greather Than Zero
5572 defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
5573 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcgt, SETGT,
5574                                                   FCMGTZssi, FCMGTZddi>;
5575
5576 // Scalar Floating-point Compare Mask Less Than Or Equal To Zero
5577 defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
5578 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fclez, SETLE,
5579                                                   FCMLEZssi, FCMLEZddi>;
5580
5581 // Scalar Floating-point Compare Mask Less Than Zero
5582 defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
5583 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcltz, SETLT,
5584                                                   FCMLTZssi, FCMLTZddi>;
5585
5586 // Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
5587 defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
5588 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcage, v1i32, f32,
5589                                          FACGEsss, v1i64, f64, FACGEddd>;
5590 def : Pat<(v1i64 (int_arm_neon_vacge (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5591           (FACGEddd FPR64:$Rn, FPR64:$Rm)>;
5592
5593 // Scalar Floating-point Absolute Compare Mask Greater Than
5594 defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
5595 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcagt, v1i32, f32,
5596                                          FACGTsss, v1i64, f64, FACGTddd>;
5597 def : Pat<(v1i64 (int_arm_neon_vacgt (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5598           (FACGTddd FPR64:$Rn, FPR64:$Rm)>;
5599
5600 // Scalar Floating-point Absolute Difference
5601 defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
5602 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vabd, f32, f32,
5603                                          FABDsss, f64, f64, FABDddd>;
5604
5605 // Scalar Absolute Value
5606 defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
5607 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
5608
5609 // Scalar Signed Saturating Absolute Value
5610 defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
5611 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
5612                                                SQABSbb, SQABShh, SQABSss, SQABSdd>;
5613
5614 // Scalar Negate
5615 defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
5616 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
5617
5618 // Scalar Signed Saturating Negate
5619 defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
5620 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
5621                                                SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
5622
5623 // Scalar Signed Saturating Accumulated of Unsigned Value
5624 defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
5625 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
5626                                                      SUQADDbb, SUQADDhh,
5627                                                      SUQADDss, SUQADDdd>;
5628
5629 // Scalar Unsigned Saturating Accumulated of Signed Value
5630 defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
5631 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
5632                                                      USQADDbb, USQADDhh,
5633                                                      USQADDss, USQADDdd>;
5634
5635 def : Pat<(v1i64 (int_aarch64_neon_suqadd (v1i64 FPR64:$Src),
5636                                           (v1i64 FPR64:$Rn))),
5637           (SUQADDdd FPR64:$Src, FPR64:$Rn)>;
5638
5639 def : Pat<(v1i64 (int_aarch64_neon_usqadd (v1i64 FPR64:$Src),
5640                                           (v1i64 FPR64:$Rn))),
5641           (USQADDdd FPR64:$Src, FPR64:$Rn)>;
5642
5643 def : Pat<(v1i64 (int_arm_neon_vabs (v1i64 FPR64:$Rn))),
5644           (ABSdd FPR64:$Rn)>;
5645
5646 def : Pat<(v1i64 (int_arm_neon_vqabs (v1i64 FPR64:$Rn))),
5647           (SQABSdd FPR64:$Rn)>;
5648
5649 def : Pat<(v1i64 (int_arm_neon_vqneg (v1i64 FPR64:$Rn))),
5650           (SQNEGdd FPR64:$Rn)>;
5651
5652 def : Pat<(v1i64 (sub (v1i64 (bitconvert (v8i8 Neon_AllZero))),
5653                       (v1i64 FPR64:$Rn))),
5654           (NEGdd FPR64:$Rn)>;
5655
5656 // Scalar Signed Saturating Extract Unsigned Narrow
5657 defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
5658 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
5659                                                      SQXTUNbh, SQXTUNhs,
5660                                                      SQXTUNsd>;
5661
5662 // Scalar Signed Saturating Extract Narrow
5663 defm SQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
5664 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
5665                                                      SQXTNbh, SQXTNhs,
5666                                                      SQXTNsd>;
5667
5668 // Scalar Unsigned Saturating Extract Narrow
5669 defm UQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
5670 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
5671                                                      UQXTNbh, UQXTNhs,
5672                                                      UQXTNsd>;
5673
5674 // Scalar Reduce Pairwise
5675
5676 multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
5677                                      string asmop, bit Commutable = 0> {
5678   let isCommutable = Commutable in {
5679     def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
5680                                 (outs FPR64:$Rd), (ins VPR128:$Rn),
5681                                 !strconcat(asmop, "\t$Rd, $Rn.2d"),
5682                                 [],
5683                                 NoItinerary>,
5684                 Sched<[WriteFPALU, ReadFPALU]>;
5685   }
5686 }
5687
5688 multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
5689                                      string asmop, bit Commutable = 0>
5690   : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
5691   let isCommutable = Commutable in {
5692     def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
5693                                 (outs FPR32:$Rd), (ins VPR64:$Rn),
5694                                 !strconcat(asmop, "\t$Rd, $Rn.2s"),
5695                                 [],
5696                                 NoItinerary>,
5697                 Sched<[WriteFPALU, ReadFPALU]>;
5698   }
5699 }
5700
5701 // Scalar Reduce Addition Pairwise (Integer) with
5702 // Pattern to match llvm.arm.* intrinsic
5703 defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
5704
5705 // Pattern to match llvm.aarch64.* intrinsic for
5706 // Scalar Reduce Addition Pairwise (Integer)
5707 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
5708           (ADDPvv_D_2D VPR128:$Rn)>;
5709 def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
5710           (ADDPvv_D_2D VPR128:$Rn)>;
5711
5712 // Scalar Reduce Addition Pairwise (Floating Point)
5713 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
5714
5715 // Scalar Reduce Maximum Pairwise (Floating Point)
5716 defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
5717
5718 // Scalar Reduce Minimum Pairwise (Floating Point)
5719 defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
5720
5721 // Scalar Reduce maxNum Pairwise (Floating Point)
5722 defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
5723
5724 // Scalar Reduce minNum Pairwise (Floating Point)
5725 defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
5726
5727 multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnode,
5728                                             Instruction INSTS,
5729                                             Instruction INSTD> {
5730   def : Pat<(f32 (opnode (v2f32 VPR64:$Rn))),
5731             (INSTS VPR64:$Rn)>;
5732   def : Pat<(f64 (opnode (v2f64 VPR128:$Rn))),
5733             (INSTD VPR128:$Rn)>;
5734 }
5735
5736 // Patterns to match llvm.aarch64.* intrinsic for
5737 // Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
5738 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
5739                                         FADDPvv_S_2S, FADDPvv_D_2D>;
5740
5741 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
5742                                         FMAXPvv_S_2S, FMAXPvv_D_2D>;
5743
5744 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
5745                                         FMINPvv_S_2S, FMINPvv_D_2D>;
5746
5747 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
5748                                         FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
5749
5750 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
5751                                         FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
5752
5753 def : Pat<(f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))),
5754           (FADDPvv_S_2S (v2f32
5755                (EXTRACT_SUBREG
5756                    (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
5757                    sub_64)))>;
5758
5759 // Scalar by element Arithmetic
5760
5761 class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
5762                                     string rmlane, bit u, bit szhi, bit szlo,
5763                                     RegisterClass ResFPR, RegisterClass OpFPR,
5764                                     RegisterOperand OpVPR, Operand OpImm>
5765   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
5766                              (outs ResFPR:$Rd),
5767                              (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
5768                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
5769                              [],
5770                              NoItinerary>,
5771     Sched<[WriteFPMul, ReadFPMul, ReadFPMul]> {
5772   bits<3> Imm;
5773   bits<5> MRm;
5774 }
5775
5776 class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
5777                                                     string rmlane,
5778                                                     bit u, bit szhi, bit szlo,
5779                                                     RegisterClass ResFPR,
5780                                                     RegisterClass OpFPR,
5781                                                     RegisterOperand OpVPR,
5782                                                     Operand OpImm>
5783   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
5784                              (outs ResFPR:$Rd),
5785                              (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
5786                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
5787                              [],
5788                              NoItinerary>,
5789     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
5790   let Constraints = "$src = $Rd";
5791   bits<3> Imm;
5792   bits<5> MRm;
5793 }
5794
5795 // Scalar Floating Point  multiply (scalar, by element)
5796 def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
5797   0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5798   let Inst{11} = Imm{1}; // h
5799   let Inst{21} = Imm{0}; // l
5800   let Inst{20-16} = MRm;
5801 }
5802 def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
5803   0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5804   let Inst{11} = Imm{0}; // h
5805   let Inst{21} = 0b0;    // l
5806   let Inst{20-16} = MRm;
5807 }
5808
5809 // Scalar Floating Point  multiply extended (scalar, by element)
5810 def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
5811   0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5812   let Inst{11} = Imm{1}; // h
5813   let Inst{21} = Imm{0}; // l
5814   let Inst{20-16} = MRm;
5815 }
5816 def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
5817   0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5818   let Inst{11} = Imm{0}; // h
5819   let Inst{21} = 0b0;    // l
5820   let Inst{20-16} = MRm;
5821 }
5822
5823 multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
5824   SDPatternOperator opnode,
5825   Instruction INST,
5826   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
5827   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5828
5829   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
5830                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
5831              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5832
5833   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
5834                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
5835              (ResTy (INST (ResTy FPRC:$Rn),
5836                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5837                OpNImm:$Imm))>;
5838
5839   // swapped operands
5840   def  : Pat<(ResTy (opnode
5841                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5842                (ResTy FPRC:$Rn))),
5843              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5844
5845   def  : Pat<(ResTy (opnode
5846                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5847                (ResTy FPRC:$Rn))),
5848              (ResTy (INST (ResTy FPRC:$Rn),
5849                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5850                OpNImm:$Imm))>;
5851 }
5852
5853 // Patterns for Scalar Floating Point  multiply (scalar, by element)
5854 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
5855   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5856 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
5857   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5858
5859 // Patterns for Scalar Floating Point  multiply extended (scalar, by element)
5860 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
5861   FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
5862   v2f32, v4f32, neon_uimm1_bare>;
5863 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
5864   FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
5865   v1f64, v2f64, neon_uimm0_bare>;
5866
5867 // Scalar Floating Point fused multiply-add (scalar, by element)
5868 def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
5869   0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5870   let Inst{11} = Imm{1}; // h
5871   let Inst{21} = Imm{0}; // l
5872   let Inst{20-16} = MRm;
5873 }
5874 def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
5875   0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5876   let Inst{11} = Imm{0}; // h
5877   let Inst{21} = 0b0;    // l
5878   let Inst{20-16} = MRm;
5879 }
5880
5881 // Scalar Floating Point fused multiply-subtract (scalar, by element)
5882 def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
5883   0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5884   let Inst{11} = Imm{1}; // h
5885   let Inst{21} = Imm{0}; // l
5886   let Inst{20-16} = MRm;
5887 }
5888 def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
5889   0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5890   let Inst{11} = Imm{0}; // h
5891   let Inst{21} = 0b0;    // l
5892   let Inst{20-16} = MRm;
5893 }
5894 // We are allowed to match the fma instruction regardless of compile options.
5895 multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
5896   Instruction FMLAI, Instruction FMLSI,
5897   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
5898   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5899   // fmla
5900   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5901                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5902                (ResTy FPRC:$Ra))),
5903              (ResTy (FMLAI (ResTy FPRC:$Ra),
5904                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5905
5906   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5907                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5908                (ResTy FPRC:$Ra))),
5909              (ResTy (FMLAI (ResTy FPRC:$Ra),
5910                (ResTy FPRC:$Rn),
5911                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5912                OpNImm:$Imm))>;
5913
5914   // swapped fmla operands
5915   def  : Pat<(ResTy (fma
5916                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5917                (ResTy FPRC:$Rn),
5918                (ResTy FPRC:$Ra))),
5919              (ResTy (FMLAI (ResTy FPRC:$Ra),
5920                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5921
5922   def  : Pat<(ResTy (fma
5923                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5924                (ResTy FPRC:$Rn),
5925                (ResTy FPRC:$Ra))),
5926              (ResTy (FMLAI (ResTy FPRC:$Ra),
5927                (ResTy FPRC:$Rn),
5928                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5929                OpNImm:$Imm))>;
5930
5931   // fmls
5932   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5933                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
5934                (ResTy FPRC:$Ra))),
5935              (ResTy (FMLSI (ResTy FPRC:$Ra),
5936                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5937
5938   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5939                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
5940                (ResTy FPRC:$Ra))),
5941              (ResTy (FMLSI (ResTy FPRC:$Ra),
5942                (ResTy FPRC:$Rn),
5943                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5944                OpNImm:$Imm))>;
5945
5946   // swapped fmls operands
5947   def  : Pat<(ResTy (fma
5948                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
5949                (ResTy FPRC:$Rn),
5950                (ResTy FPRC:$Ra))),
5951              (ResTy (FMLSI (ResTy FPRC:$Ra),
5952                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5953
5954   def  : Pat<(ResTy (fma
5955                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
5956                (ResTy FPRC:$Rn),
5957                (ResTy FPRC:$Ra))),
5958              (ResTy (FMLSI (ResTy FPRC:$Ra),
5959                (ResTy FPRC:$Rn),
5960                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5961                OpNImm:$Imm))>;
5962 }
5963
5964 // Scalar Floating Point fused multiply-add and
5965 // multiply-subtract (scalar, by element)
5966 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
5967   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5968 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
5969   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5970 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
5971   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5972
5973 // Scalar Signed saturating doubling multiply long (scalar, by element)
5974 def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5975   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5976   let Inst{11} = 0b0; // h
5977   let Inst{21} = Imm{1}; // l
5978   let Inst{20} = Imm{0}; // m
5979   let Inst{19-16} = MRm{3-0};
5980 }
5981 def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5982   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5983   let Inst{11} = Imm{2}; // h
5984   let Inst{21} = Imm{1}; // l
5985   let Inst{20} = Imm{0}; // m
5986   let Inst{19-16} = MRm{3-0};
5987 }
5988 def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5989   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5990   let Inst{11} = 0b0;    // h
5991   let Inst{21} = Imm{0}; // l
5992   let Inst{20-16} = MRm;
5993 }
5994 def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5995   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5996   let Inst{11} = Imm{1};    // h
5997   let Inst{21} = Imm{0};    // l
5998   let Inst{20-16} = MRm;
5999 }
6000
6001 multiclass Neon_ScalarXIndexedElem_MUL_Patterns<
6002   SDPatternOperator opnode,
6003   Instruction INST,
6004   ValueType ResTy, RegisterClass FPRC,
6005   ValueType OpVTy, ValueType OpTy,
6006   ValueType VecOpTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
6007
6008   def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
6009                (OpVTy (scalar_to_vector
6010                  (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))))),
6011              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6012
6013   def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
6014                (OpVTy (extract_subvector (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
6015              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6016
6017   //swapped operands
6018   def  : Pat<(ResTy (opnode
6019                (OpVTy (scalar_to_vector
6020                  (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
6021                  (OpVTy FPRC:$Rn))),
6022              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6023
6024   def  : Pat<(ResTy (opnode
6025                (OpVTy (extract_subvector (VecOpTy VPRC:$MRm), OpImm:$Imm)),
6026                (OpVTy FPRC:$Rn))),
6027              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6028 }
6029
6030
6031 // Patterns for Scalar Signed saturating doubling
6032 // multiply long (scalar, by element)
6033 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6034   SQDMULLshv_4H, v1i32, FPR16, v1i16, i16, v4i16,
6035   i32, VPR64Lo, neon_uimm2_bare>;
6036 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6037   SQDMULLshv_8H, v1i32, FPR16, v1i16, i16, v8i16,
6038   i32, VPR128Lo, neon_uimm3_bare>;
6039 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6040   SQDMULLdsv_2S, v1i64, FPR32, v1i32, i32, v2i32,
6041   i32, VPR64Lo, neon_uimm1_bare>;
6042 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6043   SQDMULLdsv_4S, v1i64, FPR32, v1i32, i32, v4i32,
6044   i32, VPR128Lo, neon_uimm2_bare>;
6045
6046 // Scalar Signed saturating doubling multiply-add long (scalar, by element)
6047 def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6048   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
6049   let Inst{11} = 0b0; // h
6050   let Inst{21} = Imm{1}; // l
6051   let Inst{20} = Imm{0}; // m
6052   let Inst{19-16} = MRm{3-0};
6053 }
6054 def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6055   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
6056   let Inst{11} = Imm{2}; // h
6057   let Inst{21} = Imm{1}; // l
6058   let Inst{20} = Imm{0}; // m
6059   let Inst{19-16} = MRm{3-0};
6060 }
6061 def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6062   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
6063   let Inst{11} = 0b0;    // h
6064   let Inst{21} = Imm{0}; // l
6065   let Inst{20-16} = MRm;
6066 }
6067 def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6068   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
6069   let Inst{11} = Imm{1};    // h
6070   let Inst{21} = Imm{0};    // l
6071   let Inst{20-16} = MRm;
6072 }
6073
6074 // Scalar Signed saturating doubling
6075 // multiply-subtract long (scalar, by element)
6076 def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6077   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
6078   let Inst{11} = 0b0; // h
6079   let Inst{21} = Imm{1}; // l
6080   let Inst{20} = Imm{0}; // m
6081   let Inst{19-16} = MRm{3-0};
6082 }
6083 def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6084   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
6085   let Inst{11} = Imm{2}; // h
6086   let Inst{21} = Imm{1}; // l
6087   let Inst{20} = Imm{0}; // m
6088   let Inst{19-16} = MRm{3-0};
6089 }
6090 def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6091   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
6092   let Inst{11} = 0b0;    // h
6093   let Inst{21} = Imm{0}; // l
6094   let Inst{20-16} = MRm;
6095 }
6096 def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6097   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
6098   let Inst{11} = Imm{1};    // h
6099   let Inst{21} = Imm{0};    // l
6100   let Inst{20-16} = MRm;
6101 }
6102
6103 multiclass Neon_ScalarXIndexedElem_MLAL_Patterns<
6104   SDPatternOperator opnode,
6105   SDPatternOperator coreopnode,
6106   Instruction INST,
6107   ValueType ResTy, RegisterClass ResFPRC, RegisterClass FPRC,
6108   ValueType OpTy,
6109   ValueType OpVTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
6110
6111   def  : Pat<(ResTy (opnode
6112                (ResTy ResFPRC:$Ra),
6113                (ResTy (coreopnode (OpTy FPRC:$Rn),
6114                  (OpTy (scalar_to_vector
6115                    (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))))))),
6116              (ResTy (INST (ResTy ResFPRC:$Ra),
6117                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6118
6119   def  : Pat<(ResTy (opnode
6120                (ResTy ResFPRC:$Ra),
6121                (ResTy (coreopnode (OpTy FPRC:$Rn),
6122                  (OpTy (extract_subvector (OpVTy VPRC:$MRm), OpImm:$Imm)))))),
6123              (ResTy (INST (ResTy ResFPRC:$Ra),
6124                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6125
6126   // swapped operands
6127   def  : Pat<(ResTy (opnode
6128                (ResTy ResFPRC:$Ra),
6129                (ResTy (coreopnode
6130                  (OpTy (scalar_to_vector
6131                    (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))),
6132                  (OpTy FPRC:$Rn))))),
6133              (ResTy (INST (ResTy ResFPRC:$Ra),
6134                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6135
6136   def  : Pat<(ResTy (opnode
6137                (ResTy ResFPRC:$Ra),
6138                (ResTy (coreopnode
6139                  (OpTy (extract_subvector (OpVTy VPRC:$MRm), OpImm:$Imm)),
6140                  (OpTy FPRC:$Rn))))),
6141              (ResTy (INST (ResTy ResFPRC:$Ra),
6142                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6143 }
6144
6145 // Patterns for Scalar Signed saturating
6146 // doubling multiply-add long (scalar, by element)
6147 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6148   int_arm_neon_vqdmull, SQDMLALshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
6149   i32, VPR64Lo, neon_uimm2_bare>;
6150 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6151   int_arm_neon_vqdmull, SQDMLALshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
6152   i32, VPR128Lo, neon_uimm3_bare>;
6153 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6154   int_arm_neon_vqdmull, SQDMLALdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
6155   i32, VPR64Lo, neon_uimm1_bare>;
6156 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6157   int_arm_neon_vqdmull, SQDMLALdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
6158   i32, VPR128Lo, neon_uimm2_bare>;
6159
6160 // Patterns for Scalar Signed saturating
6161 // doubling multiply-sub long (scalar, by element)
6162 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6163   int_arm_neon_vqdmull, SQDMLSLshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
6164   i32, VPR64Lo, neon_uimm2_bare>;
6165 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6166   int_arm_neon_vqdmull, SQDMLSLshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
6167   i32, VPR128Lo, neon_uimm3_bare>;
6168 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6169   int_arm_neon_vqdmull, SQDMLSLdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
6170   i32, VPR64Lo, neon_uimm1_bare>;
6171 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6172   int_arm_neon_vqdmull, SQDMLSLdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
6173   i32, VPR128Lo, neon_uimm2_bare>;
6174
6175 // Scalar Signed saturating doubling multiply returning
6176 // high half (scalar, by element)
6177 def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6178   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
6179   let Inst{11} = 0b0; // h
6180   let Inst{21} = Imm{1}; // l
6181   let Inst{20} = Imm{0}; // m
6182   let Inst{19-16} = MRm{3-0};
6183 }
6184 def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6185   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
6186   let Inst{11} = Imm{2}; // h
6187   let Inst{21} = Imm{1}; // l
6188   let Inst{20} = Imm{0}; // m
6189   let Inst{19-16} = MRm{3-0};
6190 }
6191 def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6192   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
6193   let Inst{11} = 0b0;    // h
6194   let Inst{21} = Imm{0}; // l
6195   let Inst{20-16} = MRm;
6196 }
6197 def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6198   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
6199   let Inst{11} = Imm{1};    // h
6200   let Inst{21} = Imm{0};    // l
6201   let Inst{20-16} = MRm;
6202 }
6203
6204 // Patterns for Scalar Signed saturating doubling multiply returning
6205 // high half (scalar, by element)
6206 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6207   SQDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16,
6208   i32, VPR64Lo, neon_uimm2_bare>;
6209 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6210   SQDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16,
6211   i32, VPR128Lo, neon_uimm3_bare>;
6212 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6213   SQDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32,
6214   i32, VPR64Lo, neon_uimm1_bare>;
6215 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6216   SQDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32,
6217   i32, VPR128Lo, neon_uimm2_bare>;
6218
6219 // Scalar Signed saturating rounding doubling multiply
6220 // returning high half (scalar, by element)
6221 def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6222   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
6223   let Inst{11} = 0b0; // h
6224   let Inst{21} = Imm{1}; // l
6225   let Inst{20} = Imm{0}; // m
6226   let Inst{19-16} = MRm{3-0};
6227 }
6228 def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6229   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
6230   let Inst{11} = Imm{2}; // h
6231   let Inst{21} = Imm{1}; // l
6232   let Inst{20} = Imm{0}; // m
6233   let Inst{19-16} = MRm{3-0};
6234 }
6235 def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6236   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
6237   let Inst{11} = 0b0;    // h
6238   let Inst{21} = Imm{0}; // l
6239   let Inst{20-16} = MRm;
6240 }
6241 def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6242   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
6243   let Inst{11} = Imm{1};    // h
6244   let Inst{21} = Imm{0};    // l
6245   let Inst{20-16} = MRm;
6246 }
6247
6248 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6249   SQRDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16, i32,
6250   VPR64Lo, neon_uimm2_bare>;
6251 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6252   SQRDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16, i32,
6253   VPR128Lo, neon_uimm3_bare>;
6254 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6255   SQRDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32, i32,
6256   VPR64Lo, neon_uimm1_bare>;
6257 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6258   SQRDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32, i32,
6259   VPR128Lo, neon_uimm2_bare>;
6260
6261 // Scalar general arithmetic operation
6262 class Neon_Scalar_GeneralMath2D_pattern<SDPatternOperator opnode,
6263                                         Instruction INST> 
6264     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
6265
6266 class Neon_Scalar_GeneralMath3D_pattern<SDPatternOperator opnode,
6267                                         Instruction INST> 
6268     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
6269           (INST FPR64:$Rn, FPR64:$Rm)>;
6270
6271 class Neon_Scalar_GeneralMath4D_pattern<SDPatternOperator opnode,
6272                                         Instruction INST> 
6273     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm),
6274               (v1f64 FPR64:$Ra))),
6275           (INST FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
6276
6277 def : Neon_Scalar_GeneralMath3D_pattern<fadd, FADDddd>;
6278 def : Neon_Scalar_GeneralMath3D_pattern<fmul, FMULddd>;
6279 def : Neon_Scalar_GeneralMath3D_pattern<fsub, FSUBddd>;
6280 def : Neon_Scalar_GeneralMath3D_pattern<fdiv, FDIVddd>;
6281 def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vabds, FABDddd>;
6282 def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmaxs, FMAXddd>;
6283 def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmins, FMINddd>;
6284 def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vmaxnm, FMAXNMddd>;
6285 def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vminnm, FMINNMddd>;
6286
6287 def : Neon_Scalar_GeneralMath2D_pattern<fabs, FABSdd>;
6288 def : Neon_Scalar_GeneralMath2D_pattern<fneg, FNEGdd>;
6289
6290 def : Neon_Scalar_GeneralMath4D_pattern<fma, FMADDdddd>;
6291 def : Neon_Scalar_GeneralMath4D_pattern<fmsub, FMSUBdddd>;
6292
6293 // Scalar Copy - DUP element to scalar
6294 class NeonI_Scalar_DUP<string asmop, string asmlane,
6295                        RegisterClass ResRC, RegisterOperand VPRC,
6296                        Operand OpImm>
6297   : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
6298                      asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
6299                      [],
6300                      NoItinerary>,
6301     Sched<[WriteFPALU, ReadFPALU]> {
6302   bits<4> Imm;
6303 }
6304
6305 def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
6306   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6307 }
6308 def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
6309   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6310 }
6311 def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
6312   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6313 }
6314 def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
6315   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6316 }
6317
6318 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 0)),
6319           (f32 (EXTRACT_SUBREG (v4f32 VPR128:$Rn), sub_32))>;
6320 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 1)),
6321           (f32 (DUPsv_S (v4f32 VPR128:$Rn), 1))>;
6322 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 2)),
6323           (f32 (DUPsv_S (v4f32 VPR128:$Rn), 2))>;
6324 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 3)),
6325           (f32 (DUPsv_S (v4f32 VPR128:$Rn), 3))>;
6326
6327 def : Pat<(f64 (vector_extract (v2f64 VPR128:$Rn), 0)),
6328           (f64 (EXTRACT_SUBREG (v2f64 VPR128:$Rn), sub_64))>;
6329 def : Pat<(f64 (vector_extract (v2f64 VPR128:$Rn), 1)),
6330           (f64 (DUPdv_D (v2f64 VPR128:$Rn), 1))>;
6331
6332 def : Pat<(f32 (vector_extract (v2f32 VPR64:$Rn), 0)),
6333           (f32 (EXTRACT_SUBREG (v2f32 VPR64:$Rn), sub_32))>;
6334 def : Pat<(f32 (vector_extract (v2f32 VPR64:$Rn), 1)),
6335           (f32 (DUPsv_S (v4f32 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6336             1))>;
6337
6338 def : Pat<(f64 (vector_extract (v1f64 VPR64:$Rn), 0)),
6339           (f64 (EXTRACT_SUBREG (v1f64 VPR64:$Rn), sub_64))>;
6340
6341 multiclass NeonI_Scalar_DUP_Ext_Vec_pattern<Instruction DUPI,
6342   ValueType ResTy, ValueType OpTy,Operand OpLImm,
6343   ValueType NOpTy, ValueType ExTy, Operand OpNImm> {
6344
6345   def : Pat<(ResTy (extract_subvector (OpTy VPR128:$Rn), OpLImm:$Imm)),
6346             (ResTy (DUPI VPR128:$Rn, OpLImm:$Imm))>;
6347
6348   def : Pat<(ResTy (extract_subvector (NOpTy VPR64:$Rn), OpNImm:$Imm)),
6349             (ResTy (DUPI
6350               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6351                 OpNImm:$Imm))>;
6352 }
6353
6354 // Patterns for extract subvectors of v1ix data using scalar DUP instructions.
6355 defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPbv_B, v1i8, v16i8, neon_uimm4_bare,
6356                                         v8i8, v16i8, neon_uimm3_bare>;
6357 defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPhv_H, v1i16, v8i16, neon_uimm3_bare,
6358                                         v4i16, v8i16, neon_uimm2_bare>;
6359 defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPsv_S, v1i32, v4i32, neon_uimm2_bare,
6360                                         v2i32, v4i32, neon_uimm1_bare>;
6361
6362 multiclass NeonI_Scalar_DUP_Copy_pattern1<Instruction DUPI, ValueType ResTy,
6363                                           ValueType OpTy, ValueType ElemTy,
6364                                           Operand OpImm, ValueType OpNTy,
6365                                           ValueType ExTy, Operand OpNImm> {
6366
6367   def : Pat<(ResTy (vector_insert (ResTy undef),
6368               (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
6369               (neon_uimm0_bare:$Imm))),
6370             (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
6371
6372   def : Pat<(ResTy (vector_insert (ResTy undef),
6373               (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
6374               (OpNImm:$Imm))),
6375             (ResTy (DUPI
6376               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6377               OpNImm:$Imm))>;
6378 }
6379
6380 multiclass NeonI_Scalar_DUP_Copy_pattern2<Instruction DUPI, ValueType ResTy,
6381                                           ValueType OpTy, ValueType ElemTy,
6382                                           Operand OpImm, ValueType OpNTy,
6383                                           ValueType ExTy, Operand OpNImm> {
6384
6385   def : Pat<(ResTy (scalar_to_vector
6386               (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)))),
6387             (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
6388
6389   def : Pat<(ResTy (scalar_to_vector
6390               (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)))),
6391             (ResTy (DUPI
6392               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6393               OpNImm:$Imm))>;
6394 }
6395
6396 // Patterns for vector copy to v1ix and v1fx vectors using scalar DUP
6397 // instructions.
6398 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
6399   v1i64, v2i64, i64, neon_uimm1_bare,
6400   v1i64, v2i64, neon_uimm0_bare>;
6401 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
6402   v1i32, v4i32, i32, neon_uimm2_bare,
6403   v2i32, v4i32, neon_uimm1_bare>;
6404 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPhv_H,
6405   v1i16, v8i16, i32, neon_uimm3_bare,
6406   v4i16, v8i16, neon_uimm2_bare>;
6407 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPbv_B,
6408   v1i8, v16i8, i32, neon_uimm4_bare,
6409   v8i8, v16i8, neon_uimm3_bare>;
6410 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
6411   v1i64, v2i64, i64, neon_uimm1_bare,
6412   v1i64, v2i64, neon_uimm0_bare>;
6413 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
6414   v1i32, v4i32, i32, neon_uimm2_bare,
6415   v2i32, v4i32, neon_uimm1_bare>;
6416 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPhv_H,
6417   v1i16, v8i16, i32, neon_uimm3_bare,
6418   v4i16, v8i16, neon_uimm2_bare>;
6419 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPbv_B,
6420   v1i8, v16i8, i32, neon_uimm4_bare,
6421   v8i8, v16i8, neon_uimm3_bare>;
6422
6423 multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
6424                                   Instruction DUPI, Operand OpImm,
6425                                   RegisterClass ResRC> {
6426   def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn" # asmlane # "[$Imm]"),
6427           (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
6428 }
6429
6430 // Aliases for Scalar copy - DUP element (scalar)
6431 // FIXME: This is actually the preferred syntax but TableGen can't deal with
6432 // custom printing of aliases.
6433 defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
6434 defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
6435 defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
6436 defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
6437
6438 multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh, ValueType ResTy,
6439                       ValueType OpTy> {
6440   def : Pat<(ResTy (GetLow VPR128:$Rn)),
6441             (ResTy (EXTRACT_SUBREG (OpTy VPR128:$Rn), sub_64))>;
6442   def : Pat<(ResTy (GetHigh VPR128:$Rn)),
6443             (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
6444 }
6445
6446 defm : NeonI_SDUP<Neon_Low16B, Neon_High16B, v8i8, v16i8>;
6447 defm : NeonI_SDUP<Neon_Low8H, Neon_High8H, v4i16, v8i16>;
6448 defm : NeonI_SDUP<Neon_Low4S, Neon_High4S, v2i32, v4i32>;
6449 defm : NeonI_SDUP<Neon_Low2D, Neon_High2D, v1i64, v2i64>;
6450 defm : NeonI_SDUP<Neon_Low4float, Neon_High4float, v2f32, v4f32>;
6451 defm : NeonI_SDUP<Neon_Low2double, Neon_High2double, v1f64, v2f64>;
6452
6453 // The following is for sext/zext from v1xx to v1xx
6454 multiclass NeonI_ext<string prefix, SDNode ExtOp> {
6455   // v1i32 -> v1i64
6456   def : Pat<(v1i64 (ExtOp (v1i32 FPR32:$Rn))),
6457             (EXTRACT_SUBREG 
6458               (v2i64 (!cast<Instruction>(prefix # "_2S")
6459                 (v2i32 (SUBREG_TO_REG (i64 0), $Rn, sub_32)), 0)),
6460               sub_64)>;
6461   
6462   // v1i16 -> v1i32
6463   def : Pat<(v1i32 (ExtOp (v1i16 FPR16:$Rn))),
6464             (EXTRACT_SUBREG 
6465               (v4i32 (!cast<Instruction>(prefix # "_4H")
6466                 (v4i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)), 0)),
6467               sub_32)>;
6468   
6469   // v1i8 -> v1i16
6470   def : Pat<(v1i16 (ExtOp (v1i8 FPR8:$Rn))),
6471             (EXTRACT_SUBREG 
6472               (v8i16 (!cast<Instruction>(prefix # "_8B")
6473                 (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
6474               sub_16)>;
6475 }
6476
6477 defm NeonI_zext : NeonI_ext<"USHLLvvi", zext>;
6478 defm NeonI_sext : NeonI_ext<"SSHLLvvi", sext>;
6479
6480 // zext v1i8 -> v1i32
6481 def : Pat<(v1i32 (zext (v1i8 FPR8:$Rn))),
6482           (v1i32 (EXTRACT_SUBREG
6483             (v1i64 (SUBREG_TO_REG (i64 0),
6484               (v1i8 (DUPbv_B
6485                 (v16i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)),
6486                 0)),
6487               sub_8)),
6488             sub_32))>;
6489
6490 // zext v1i8 -> v1i64
6491 def : Pat<(v1i64 (zext (v1i8 FPR8:$Rn))),
6492           (v1i64 (SUBREG_TO_REG (i64 0),
6493             (v1i8 (DUPbv_B
6494               (v16i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)),
6495               0)),
6496             sub_8))>;
6497
6498 // zext v1i16 -> v1i64
6499 def : Pat<(v1i64 (zext (v1i16 FPR16:$Rn))),
6500           (v1i64 (SUBREG_TO_REG (i64 0),
6501             (v1i16 (DUPhv_H
6502               (v8i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)),
6503               0)),
6504             sub_16))>;
6505
6506 // sext v1i8 -> v1i32
6507 def : Pat<(v1i32 (sext (v1i8 FPR8:$Rn))),
6508           (EXTRACT_SUBREG
6509             (v4i32 (SSHLLvvi_4H
6510               (v4i16 (SUBREG_TO_REG (i64 0),
6511                 (v1i16 (EXTRACT_SUBREG 
6512                   (v8i16 (SSHLLvvi_8B
6513                     (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
6514                   sub_16)),
6515                 sub_16)), 0)),
6516             sub_32)>;
6517               
6518 // sext v1i8 -> v1i64
6519 def : Pat<(v1i64 (sext (v1i8 FPR8:$Rn))),
6520           (EXTRACT_SUBREG 
6521             (v2i64 (SSHLLvvi_2S
6522               (v2i32 (SUBREG_TO_REG (i64 0),
6523                 (v1i32 (EXTRACT_SUBREG
6524                   (v4i32 (SSHLLvvi_4H
6525                     (v4i16 (SUBREG_TO_REG (i64 0),
6526                       (v1i16 (EXTRACT_SUBREG 
6527                         (v8i16 (SSHLLvvi_8B
6528                           (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
6529                         sub_16)),
6530                       sub_16)), 0)),
6531                   sub_32)),
6532                 sub_32)), 0)),
6533             sub_64)>;
6534
6535   
6536 // sext v1i16 -> v1i64
6537 def : Pat<(v1i64 (sext (v1i16 FPR16:$Rn))),
6538           (EXTRACT_SUBREG
6539             (v2i64 (SSHLLvvi_2S
6540               (v2i32 (SUBREG_TO_REG (i64 0),
6541                 (v1i32 (EXTRACT_SUBREG 
6542                   (v4i32 (SSHLLvvi_4H
6543                     (v4i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)), 0)),
6544                   sub_32)),
6545                 sub_32)), 0)),
6546             sub_64)>;
6547
6548 //===----------------------------------------------------------------------===//
6549 // Non-Instruction Patterns
6550 //===----------------------------------------------------------------------===//
6551
6552 // 64-bit vector bitcasts...
6553
6554 def : Pat<(v1i64 (bitconvert (v8i8  VPR64:$src))), (v1i64 VPR64:$src)>;
6555 def : Pat<(v2f32 (bitconvert (v8i8  VPR64:$src))), (v2f32 VPR64:$src)>;
6556 def : Pat<(v2i32 (bitconvert (v8i8  VPR64:$src))), (v2i32 VPR64:$src)>;
6557 def : Pat<(v4i16 (bitconvert (v8i8  VPR64:$src))), (v4i16 VPR64:$src)>;
6558
6559 def : Pat<(v1i64 (bitconvert (v4i16  VPR64:$src))), (v1i64 VPR64:$src)>;
6560 def : Pat<(v2i32 (bitconvert (v4i16  VPR64:$src))), (v2i32 VPR64:$src)>;
6561 def : Pat<(v2f32 (bitconvert (v4i16  VPR64:$src))), (v2f32 VPR64:$src)>;
6562 def : Pat<(v8i8  (bitconvert (v4i16  VPR64:$src))), (v8i8 VPR64:$src)>;
6563
6564 def : Pat<(v1i64 (bitconvert (v2i32  VPR64:$src))), (v1i64 VPR64:$src)>;
6565 def : Pat<(v2f32 (bitconvert (v2i32  VPR64:$src))), (v2f32 VPR64:$src)>;
6566 def : Pat<(v4i16 (bitconvert (v2i32  VPR64:$src))), (v4i16 VPR64:$src)>;
6567 def : Pat<(v8i8  (bitconvert (v2i32  VPR64:$src))), (v8i8 VPR64:$src)>;
6568
6569 def : Pat<(v1i64 (bitconvert (v2f32  VPR64:$src))), (v1i64 VPR64:$src)>;
6570 def : Pat<(v2i32 (bitconvert (v2f32  VPR64:$src))), (v2i32 VPR64:$src)>;
6571 def : Pat<(v4i16 (bitconvert (v2f32  VPR64:$src))), (v4i16 VPR64:$src)>;
6572 def : Pat<(v8i8  (bitconvert (v2f32  VPR64:$src))), (v8i8 VPR64:$src)>;
6573
6574 def : Pat<(v2f32 (bitconvert (v1i64  VPR64:$src))), (v2f32 VPR64:$src)>;
6575 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
6576 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
6577 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
6578
6579 def : Pat<(v1i64 (bitconvert (v1f64  VPR64:$src))), (v1i64 VPR64:$src)>;
6580 def : Pat<(v2f32 (bitconvert (v1f64  VPR64:$src))), (v2f32 VPR64:$src)>;
6581 def : Pat<(v2i32 (bitconvert (v1f64  VPR64:$src))), (v2i32 VPR64:$src)>;
6582 def : Pat<(v4i16 (bitconvert (v1f64  VPR64:$src))), (v4i16 VPR64:$src)>;
6583 def : Pat<(v8i8 (bitconvert (v1f64  VPR64:$src))), (v8i8 VPR64:$src)>;
6584 def : Pat<(f64   (bitconvert (v1f64  VPR64:$src))), (f64 VPR64:$src)>;
6585
6586 def : Pat<(v1f64 (bitconvert (v1i64  VPR64:$src))), (v1f64 VPR64:$src)>;
6587 def : Pat<(v1f64 (bitconvert (v2f32  VPR64:$src))), (v1f64 VPR64:$src)>;
6588 def : Pat<(v1f64 (bitconvert (v2i32  VPR64:$src))), (v1f64 VPR64:$src)>;
6589 def : Pat<(v1f64 (bitconvert (v4i16  VPR64:$src))), (v1f64 VPR64:$src)>;
6590 def : Pat<(v1f64 (bitconvert (v8i8  VPR64:$src))), (v1f64 VPR64:$src)>;
6591 def : Pat<(v1f64 (bitconvert (f64  VPR64:$src))), (v1f64 VPR64:$src)>;
6592
6593 // ..and 128-bit vector bitcasts...
6594
6595 def : Pat<(v2f64 (bitconvert (v16i8  VPR128:$src))), (v2f64 VPR128:$src)>;
6596 def : Pat<(v2i64 (bitconvert (v16i8  VPR128:$src))), (v2i64 VPR128:$src)>;
6597 def : Pat<(v4f32 (bitconvert (v16i8  VPR128:$src))), (v4f32 VPR128:$src)>;
6598 def : Pat<(v4i32 (bitconvert (v16i8  VPR128:$src))), (v4i32 VPR128:$src)>;
6599 def : Pat<(v8i16 (bitconvert (v16i8  VPR128:$src))), (v8i16 VPR128:$src)>;
6600
6601 def : Pat<(v2f64 (bitconvert (v8i16  VPR128:$src))), (v2f64 VPR128:$src)>;
6602 def : Pat<(v2i64 (bitconvert (v8i16  VPR128:$src))), (v2i64 VPR128:$src)>;
6603 def : Pat<(v4i32 (bitconvert (v8i16  VPR128:$src))), (v4i32 VPR128:$src)>;
6604 def : Pat<(v4f32 (bitconvert (v8i16  VPR128:$src))), (v4f32 VPR128:$src)>;
6605 def : Pat<(v16i8 (bitconvert (v8i16  VPR128:$src))), (v16i8 VPR128:$src)>;
6606
6607 def : Pat<(v2f64 (bitconvert (v4i32  VPR128:$src))), (v2f64 VPR128:$src)>;
6608 def : Pat<(v2i64 (bitconvert (v4i32  VPR128:$src))), (v2i64 VPR128:$src)>;
6609 def : Pat<(v4f32 (bitconvert (v4i32  VPR128:$src))), (v4f32 VPR128:$src)>;
6610 def : Pat<(v8i16 (bitconvert (v4i32  VPR128:$src))), (v8i16 VPR128:$src)>;
6611 def : Pat<(v16i8 (bitconvert (v4i32  VPR128:$src))), (v16i8 VPR128:$src)>;
6612
6613 def : Pat<(v2f64 (bitconvert (v4f32  VPR128:$src))), (v2f64 VPR128:$src)>;
6614 def : Pat<(v2i64 (bitconvert (v4f32  VPR128:$src))), (v2i64 VPR128:$src)>;
6615 def : Pat<(v4i32 (bitconvert (v4f32  VPR128:$src))), (v4i32 VPR128:$src)>;
6616 def : Pat<(v8i16 (bitconvert (v4f32  VPR128:$src))), (v8i16 VPR128:$src)>;
6617 def : Pat<(v16i8 (bitconvert (v4f32  VPR128:$src))), (v16i8 VPR128:$src)>;
6618
6619 def : Pat<(v2f64 (bitconvert (v2i64  VPR128:$src))), (v2f64 VPR128:$src)>;
6620 def : Pat<(v4f32 (bitconvert (v2i64  VPR128:$src))), (v4f32 VPR128:$src)>;
6621 def : Pat<(v4i32 (bitconvert (v2i64  VPR128:$src))), (v4i32 VPR128:$src)>;
6622 def : Pat<(v8i16 (bitconvert (v2i64  VPR128:$src))), (v8i16 VPR128:$src)>;
6623 def : Pat<(v16i8 (bitconvert (v2i64  VPR128:$src))), (v16i8 VPR128:$src)>;
6624
6625 def : Pat<(v2i64 (bitconvert (v2f64  VPR128:$src))), (v2i64 VPR128:$src)>;
6626 def : Pat<(v4f32 (bitconvert (v2f64  VPR128:$src))), (v4f32 VPR128:$src)>;
6627 def : Pat<(v4i32 (bitconvert (v2f64  VPR128:$src))), (v4i32 VPR128:$src)>;
6628 def : Pat<(v8i16 (bitconvert (v2f64  VPR128:$src))), (v8i16 VPR128:$src)>;
6629 def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
6630
6631 // ...and scalar bitcasts...
6632 def : Pat<(f16 (bitconvert (v1i16  FPR16:$src))), (f16 FPR16:$src)>;
6633 def : Pat<(f32 (bitconvert (v1i32  FPR32:$src))), (f32 FPR32:$src)>;
6634 def : Pat<(f64 (bitconvert (v1i64  FPR64:$src))), (f64 FPR64:$src)>;
6635 def : Pat<(f64 (bitconvert (v1f64  FPR64:$src))), (f64 FPR64:$src)>;
6636
6637 def : Pat<(i64 (bitconvert (v1i64  FPR64:$src))), (FMOVxd $src)>;
6638 def : Pat<(i64 (bitconvert (v1f64  FPR64:$src))), (FMOVxd $src)>;
6639 def : Pat<(i64 (bitconvert (v2i32  FPR64:$src))), (FMOVxd $src)>;
6640 def : Pat<(i64 (bitconvert (v2f32  FPR64:$src))), (FMOVxd $src)>;
6641 def : Pat<(i64 (bitconvert (v4i16  FPR64:$src))), (FMOVxd $src)>;
6642 def : Pat<(i64 (bitconvert (v8i8  FPR64:$src))), (FMOVxd $src)>;
6643
6644 def : Pat<(i32 (bitconvert (v1i32  FPR32:$src))), (FMOVws $src)>;
6645
6646 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
6647 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
6648 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
6649
6650 def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))), (f64 VPR64:$src)>;
6651 def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))), (f64 VPR64:$src)>;
6652 def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))), (f64 VPR64:$src)>;
6653 def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))), (f64 VPR64:$src)>;
6654 def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))), (f64 VPR64:$src)>;
6655
6656 def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))), (f128 VPR128:$src)>;
6657 def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))), (f128 VPR128:$src)>;
6658 def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))), (f128 VPR128:$src)>;
6659 def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))), (f128 VPR128:$src)>;
6660 def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))), (f128 VPR128:$src)>;
6661 def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))), (f128 VPR128:$src)>;
6662
6663 def : Pat<(v1i16 (bitconvert (f16  FPR16:$src))), (v1i16 FPR16:$src)>;
6664 def : Pat<(v1i32 (bitconvert (f32  FPR32:$src))), (v1i32 FPR32:$src)>;
6665 def : Pat<(v1i64 (bitconvert (f64  FPR64:$src))), (v1i64 FPR64:$src)>;
6666 def : Pat<(v1f64 (bitconvert (f64  FPR64:$src))), (v1f64 FPR64:$src)>;
6667
6668 def : Pat<(v1i64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6669 def : Pat<(v1f64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6670 def : Pat<(v2i32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6671 def : Pat<(v2f32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6672 def : Pat<(v4i16 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6673 def : Pat<(v8i8 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6674
6675 def : Pat<(v1i32 (bitconvert (i32  GPR32:$src))), (FMOVsw $src)>;
6676
6677 def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))), (v8i8 FPR64:$src)>;
6678 def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
6679 def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
6680 def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
6681 def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
6682
6683 def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))), (v16i8 FPR128:$src)>;
6684 def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))), (v8i16 FPR128:$src)>;
6685 def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))), (v4i32 FPR128:$src)>;
6686 def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))), (v2i64 FPR128:$src)>;
6687 def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))), (v4f32 FPR128:$src)>;
6688 def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))), (v2f64 FPR128:$src)>;
6689
6690 // Scalar Three Same
6691
6692 def neon_uimm3 : Operand<i64>,
6693                    ImmLeaf<i64, [{return Imm < 8;}]> {
6694   let ParserMatchClass = uimm3_asmoperand;
6695   let PrintMethod = "printUImmHexOperand";
6696 }
6697
6698 def neon_uimm4 : Operand<i64>,
6699                    ImmLeaf<i64, [{return Imm < 16;}]> {
6700   let ParserMatchClass = uimm4_asmoperand;
6701   let PrintMethod = "printUImmHexOperand";
6702 }
6703
6704 // Bitwise Extract
6705 class NeonI_Extract<bit q, bits<2> op2, string asmop,
6706                     string OpS, RegisterOperand OpVPR, Operand OpImm>
6707   : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
6708                      (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
6709                      asmop # "\t$Rd." # OpS # ", $Rn." # OpS #
6710                      ", $Rm." # OpS # ", $Index",
6711                      [],
6712                      NoItinerary>,
6713     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>{
6714   bits<4> Index;
6715 }
6716
6717 def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
6718                                VPR64, neon_uimm3> {
6719   let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
6720 }
6721
6722 def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
6723                                VPR128, neon_uimm4> {
6724   let Inst{14-11} = Index;
6725 }
6726
6727 class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
6728                  Operand OpImm>
6729   : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
6730                                  (i64 OpImm:$Imm))),
6731               (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
6732
6733 def : NI_Extract<v8i8,  VPR64,  EXTvvvi_8b,  neon_uimm3>;
6734 def : NI_Extract<v4i16, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6735 def : NI_Extract<v2i32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6736 def : NI_Extract<v1i64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6737 def : NI_Extract<v2f32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6738 def : NI_Extract<v1f64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6739 def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
6740 def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
6741 def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
6742 def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
6743 def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
6744 def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
6745
6746 // Table lookup
6747 class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
6748              string asmop, string OpS, RegisterOperand OpVPR,
6749              RegisterOperand VecList>
6750   : NeonI_TBL<q, op2, len, op,
6751               (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
6752               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
6753               [],
6754               NoItinerary>,
6755     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
6756
6757 // The vectors in look up table are always 16b
6758 multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
6759   def _8b  : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
6760                     !cast<RegisterOperand>(List # "16B_operand")>;
6761
6762   def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
6763                     !cast<RegisterOperand>(List # "16B_operand")>;
6764 }
6765
6766 defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
6767 defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
6768 defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
6769 defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
6770
6771 // Table lookup extension
6772 class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
6773              string asmop, string OpS, RegisterOperand OpVPR,
6774              RegisterOperand VecList>
6775   : NeonI_TBL<q, op2, len, op,
6776               (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
6777               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
6778               [],
6779               NoItinerary>,
6780     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
6781   let Constraints = "$src = $Rd";
6782 }
6783
6784 // The vectors in look up table are always 16b
6785 multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
6786   def _8b  : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
6787                     !cast<RegisterOperand>(List # "16B_operand")>;
6788
6789   def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
6790                     !cast<RegisterOperand>(List # "16B_operand")>;
6791 }
6792
6793 defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
6794 defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
6795 defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
6796 defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
6797
6798 class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
6799                      RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
6800   : NeonI_copy<0b1, 0b0, 0b0011,
6801                (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
6802                asmop # "\t$Rd." # Res # "[$Imm], $Rn",
6803                [(set (ResTy VPR128:$Rd),
6804                  (ResTy (vector_insert
6805                    (ResTy VPR128:$src),
6806                    (OpTy OpGPR:$Rn),
6807                    (OpImm:$Imm))))],
6808                NoItinerary>,
6809     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
6810   bits<4> Imm;
6811   let Constraints = "$src = $Rd";
6812 }
6813
6814 //Insert element (vector, from main)
6815 def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
6816                            neon_uimm4_bare> {
6817   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6818 }
6819 def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
6820                            neon_uimm3_bare> {
6821   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6822 }
6823 def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
6824                            neon_uimm2_bare> {
6825   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6826 }
6827 def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
6828                            neon_uimm1_bare> {
6829   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6830 }
6831
6832 def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
6833                     (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
6834 def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
6835                     (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
6836 def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
6837                     (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
6838 def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
6839                     (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
6840
6841 class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
6842                              RegisterClass OpGPR, ValueType OpTy,
6843                              Operand OpImm, Instruction INS>
6844   : Pat<(ResTy (vector_insert
6845               (ResTy VPR64:$src),
6846               (OpTy OpGPR:$Rn),
6847               (OpImm:$Imm))),
6848         (ResTy (EXTRACT_SUBREG
6849           (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
6850             OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
6851
6852 def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
6853                                           neon_uimm3_bare, INSbw>;
6854 def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
6855                                           neon_uimm2_bare, INShw>;
6856 def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
6857                                           neon_uimm1_bare, INSsw>;
6858 def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
6859                                           neon_uimm0_bare, INSdx>;
6860
6861 class NeonI_INS_element<string asmop, string Res, Operand ResImm>
6862   : NeonI_insert<0b1, 0b1,
6863                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
6864                  ResImm:$Immd, ResImm:$Immn),
6865                  asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
6866                  [],
6867                  NoItinerary>,
6868     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
6869   let Constraints = "$src = $Rd";
6870   bits<4> Immd;
6871   bits<4> Immn;
6872 }
6873
6874 //Insert element (vector, from element)
6875 def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
6876   let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
6877   let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
6878 }
6879 def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
6880   let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
6881   let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
6882   // bit 11 is unspecified, but should be set to zero.
6883 }
6884 def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
6885   let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
6886   let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
6887   // bits 11-12 are unspecified, but should be set to zero.
6888 }
6889 def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
6890   let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
6891   let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
6892   // bits 11-13 are unspecified, but should be set to zero.
6893 }
6894
6895 def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
6896                     (INSELb VPR128:$Rd, VPR128:$Rn,
6897                       neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
6898 def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
6899                     (INSELh VPR128:$Rd, VPR128:$Rn,
6900                       neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
6901 def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
6902                     (INSELs VPR128:$Rd, VPR128:$Rn,
6903                       neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
6904 def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
6905                     (INSELd VPR128:$Rd, VPR128:$Rn,
6906                       neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
6907
6908 multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
6909                                 ValueType MidTy, Operand StImm, Operand NaImm,
6910                                 Instruction INS> {
6911 def : Pat<(ResTy (vector_insert
6912             (ResTy VPR128:$src),
6913             (MidTy (vector_extract
6914               (ResTy VPR128:$Rn),
6915               (StImm:$Immn))),
6916             (StImm:$Immd))),
6917           (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
6918               StImm:$Immd, StImm:$Immn)>;
6919
6920 def : Pat <(ResTy (vector_insert
6921              (ResTy VPR128:$src),
6922              (MidTy (vector_extract
6923                (NaTy VPR64:$Rn),
6924                (NaImm:$Immn))),
6925              (StImm:$Immd))),
6926            (INS (ResTy VPR128:$src),
6927              (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6928              StImm:$Immd, NaImm:$Immn)>;
6929
6930 def : Pat <(NaTy (vector_insert
6931              (NaTy VPR64:$src),
6932              (MidTy (vector_extract
6933                (ResTy VPR128:$Rn),
6934                (StImm:$Immn))),
6935              (NaImm:$Immd))),
6936            (NaTy (EXTRACT_SUBREG
6937              (ResTy (INS
6938                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6939                (ResTy VPR128:$Rn),
6940                NaImm:$Immd, StImm:$Immn)),
6941              sub_64))>;
6942
6943 def : Pat <(NaTy (vector_insert
6944              (NaTy VPR64:$src),
6945              (MidTy (vector_extract
6946                (NaTy VPR64:$Rn),
6947                (NaImm:$Immn))),
6948              (NaImm:$Immd))),
6949            (NaTy (EXTRACT_SUBREG
6950              (ResTy (INS
6951                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6952                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6953                NaImm:$Immd, NaImm:$Immn)),
6954              sub_64))>;
6955 }
6956
6957 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
6958                             neon_uimm1_bare, INSELs>;
6959 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
6960                             neon_uimm0_bare, INSELd>;
6961 defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
6962                             neon_uimm3_bare, INSELb>;
6963 defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
6964                             neon_uimm2_bare, INSELh>;
6965 defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6966                             neon_uimm1_bare, INSELs>;
6967 defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
6968                             neon_uimm0_bare, INSELd>;
6969
6970 multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
6971                                       ValueType MidTy,
6972                                       RegisterClass OpFPR, Operand ResImm,
6973                                       SubRegIndex SubIndex, Instruction INS> {
6974 def : Pat <(ResTy (vector_insert
6975              (ResTy VPR128:$src),
6976              (MidTy OpFPR:$Rn),
6977              (ResImm:$Imm))),
6978            (INS (ResTy VPR128:$src),
6979              (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
6980              ResImm:$Imm,
6981              (i64 0))>;
6982
6983 def : Pat <(NaTy (vector_insert
6984              (NaTy VPR64:$src),
6985              (MidTy OpFPR:$Rn),
6986              (ResImm:$Imm))),
6987            (NaTy (EXTRACT_SUBREG
6988              (ResTy (INS
6989                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6990                (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
6991                ResImm:$Imm,
6992                (i64 0))),
6993              sub_64))>;
6994 }
6995
6996 defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
6997                                   sub_32, INSELs>;
6998 defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
6999                                   sub_64, INSELd>;
7000
7001 class NeonI_SMOV<string asmop, string Res, bit Q,
7002                  ValueType OpTy, ValueType eleTy,
7003                  Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
7004   : NeonI_copy<Q, 0b0, 0b0101,
7005                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
7006                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
7007                [(set (ResTy ResGPR:$Rd),
7008                  (ResTy (sext_inreg
7009                    (ResTy (vector_extract
7010                      (OpTy VPR128:$Rn), (OpImm:$Imm))),
7011                    eleTy)))],
7012                NoItinerary>,
7013     Sched<[WriteFPALU, ReadFPALU]> {
7014   bits<4> Imm;
7015 }
7016
7017 //Signed integer move (main, from element)
7018 def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
7019                         GPR32, i32> {
7020   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7021 }
7022 def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
7023                         GPR32, i32> {
7024   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7025 }
7026 def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
7027                         GPR64, i64> {
7028   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7029 }
7030 def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
7031                         GPR64, i64> {
7032   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7033 }
7034 def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
7035                         GPR64, i64> {
7036   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7037 }
7038
7039 multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
7040                                ValueType eleTy, Operand StImm,  Operand NaImm,
7041                                Instruction SMOVI> {
7042   def : Pat<(i64 (sext_inreg
7043               (i64 (anyext
7044                 (i32 (vector_extract
7045                   (StTy VPR128:$Rn), (StImm:$Imm))))),
7046               eleTy)),
7047             (SMOVI VPR128:$Rn, StImm:$Imm)>;
7048
7049   def : Pat<(i64 (sext
7050               (i32 (vector_extract
7051                 (StTy VPR128:$Rn), (StImm:$Imm))))),
7052             (SMOVI VPR128:$Rn, StImm:$Imm)>;
7053
7054   def : Pat<(i64 (sext_inreg
7055               (i64 (vector_extract
7056                 (NaTy VPR64:$Rn), (NaImm:$Imm))),
7057               eleTy)),
7058             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7059               NaImm:$Imm)>;
7060
7061   def : Pat<(i64 (sext_inreg
7062               (i64 (anyext
7063                 (i32 (vector_extract
7064                   (NaTy VPR64:$Rn), (NaImm:$Imm))))),
7065               eleTy)),
7066             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7067               NaImm:$Imm)>;
7068
7069   def : Pat<(i64 (sext
7070               (i32 (vector_extract
7071                 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
7072             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7073               NaImm:$Imm)>;
7074 }
7075
7076 defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
7077                           neon_uimm3_bare, SMOVxb>;
7078 defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
7079                           neon_uimm2_bare, SMOVxh>;
7080 defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
7081                           neon_uimm1_bare, SMOVxs>;
7082
7083 class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
7084                           ValueType eleTy, Operand StImm,  Operand NaImm,
7085                           Instruction SMOVI>
7086   : Pat<(i32 (sext_inreg
7087           (i32 (vector_extract
7088             (NaTy VPR64:$Rn), (NaImm:$Imm))),
7089           eleTy)),
7090         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7091           NaImm:$Imm)>;
7092
7093 def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
7094                          neon_uimm3_bare, SMOVwb>;
7095 def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
7096                          neon_uimm2_bare, SMOVwh>;
7097
7098 class NeonI_UMOV<string asmop, string Res, bit Q,
7099                  ValueType OpTy, Operand OpImm,
7100                  RegisterClass ResGPR, ValueType ResTy>
7101   : NeonI_copy<Q, 0b0, 0b0111,
7102                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
7103                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
7104                [(set (ResTy ResGPR:$Rd),
7105                   (ResTy (vector_extract
7106                     (OpTy VPR128:$Rn), (OpImm:$Imm))))],
7107                NoItinerary>,
7108     Sched<[WriteFPALU, ReadFPALU]> {
7109   bits<4> Imm;
7110 }
7111
7112 //Unsigned integer move (main, from element)
7113 def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
7114                          GPR32, i32> {
7115   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7116 }
7117 def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
7118                          GPR32, i32> {
7119   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7120 }
7121 def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
7122                          GPR32, i32> {
7123   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7124 }
7125 def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
7126                          GPR64, i64> {
7127   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
7128 }
7129
7130 def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
7131                     (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
7132 def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
7133                     (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
7134
7135 class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
7136                          Operand StImm,  Operand NaImm,
7137                          Instruction SMOVI>
7138   : Pat<(ResTy (vector_extract
7139           (NaTy VPR64:$Rn), NaImm:$Imm)),
7140         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7141           NaImm:$Imm)>;
7142
7143 def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
7144                         neon_uimm3_bare, UMOVwb>;
7145 def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
7146                         neon_uimm2_bare, UMOVwh>;
7147 def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
7148                         neon_uimm1_bare, UMOVws>;
7149
7150 def : Pat<(i32 (and
7151             (i32 (vector_extract
7152               (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
7153             255)),
7154           (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
7155
7156 def : Pat<(i32 (and
7157             (i32 (vector_extract
7158               (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
7159             65535)),
7160           (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
7161
7162 def : Pat<(i64 (zext
7163             (i32 (vector_extract
7164               (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
7165           (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
7166
7167 def : Pat<(i32 (and
7168             (i32 (vector_extract
7169               (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
7170             255)),
7171           (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7172             neon_uimm3_bare:$Imm)>;
7173
7174 def : Pat<(i32 (and
7175             (i32 (vector_extract
7176               (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
7177             65535)),
7178           (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7179             neon_uimm2_bare:$Imm)>;
7180
7181 def : Pat<(i64 (zext
7182             (i32 (vector_extract
7183               (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
7184           (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7185             neon_uimm0_bare:$Imm)>;
7186
7187 // Additional copy patterns for scalar types
7188 def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
7189           (UMOVwb (v16i8
7190             (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
7191
7192 def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
7193           (UMOVwh (v8i16
7194             (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
7195
7196 def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
7197           (FMOVws FPR32:$Rn)>;
7198
7199 def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
7200           (FMOVxd FPR64:$Rn)>;
7201
7202 def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
7203           (f64 FPR64:$Rn)>;
7204
7205 def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
7206           (v1i8 (EXTRACT_SUBREG (v16i8
7207             (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
7208             sub_8))>;
7209
7210 def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
7211           (v1i16 (EXTRACT_SUBREG (v8i16
7212             (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
7213             sub_16))>;
7214
7215 def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
7216           (FMOVsw $src)>;
7217
7218 def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
7219           (FMOVdx $src)>;
7220
7221 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
7222           (v8i8 (EXTRACT_SUBREG (v16i8
7223             (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
7224             sub_64))>;
7225
7226 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
7227           (v4i16 (EXTRACT_SUBREG (v8i16
7228             (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
7229             sub_64))>;
7230
7231 def : Pat<(v2i32 (scalar_to_vector GPR32:$Rn)),
7232           (v2i32 (EXTRACT_SUBREG (v16i8
7233             (INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))),
7234             sub_64))>;
7235
7236 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
7237           (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7238
7239 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
7240           (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7241
7242 def : Pat<(v4i32 (scalar_to_vector GPR32:$Rn)),
7243           (INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7244
7245 def : Pat<(v2i64 (scalar_to_vector GPR64:$Rn)),
7246           (INSdx (v2i64 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7247
7248 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
7249           (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)>;
7250 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
7251           (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)>;
7252
7253 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
7254           (v1f64 FPR64:$Rn)>;
7255
7256 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
7257           (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
7258                          (f64 FPR64:$src), sub_64)>;
7259
7260 class NeonI_DUP_Elt<bit Q, string asmop, string rdlane,  string rnlane,
7261                     RegisterOperand ResVPR, Operand OpImm>
7262   : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
7263                (ins VPR128:$Rn, OpImm:$Imm),
7264                asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
7265                [],
7266                NoItinerary>,
7267     Sched<[WriteFPALU, ReadFPALU]> {
7268   bits<4> Imm;
7269 }
7270
7271 def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
7272                               neon_uimm4_bare> {
7273   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7274 }
7275
7276 def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
7277                               neon_uimm3_bare> {
7278   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7279 }
7280
7281 def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
7282                               neon_uimm2_bare> {
7283   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7284 }
7285
7286 def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
7287                               neon_uimm1_bare> {
7288   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
7289 }
7290
7291 def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
7292                               neon_uimm4_bare> {
7293   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7294 }
7295
7296 def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
7297                               neon_uimm3_bare> {
7298   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7299 }
7300
7301 def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
7302                               neon_uimm2_bare> {
7303   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7304 }
7305
7306 multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
7307                                        ValueType OpTy,ValueType NaTy,
7308                                        ValueType ExTy, Operand OpLImm,
7309                                        Operand OpNImm> {
7310 def  : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
7311         (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
7312
7313 def : Pat<(ResTy (Neon_vduplane
7314             (NaTy VPR64:$Rn), OpNImm:$Imm)),
7315           (ResTy (DUPELT
7316             (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
7317 }
7318 defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
7319                              neon_uimm4_bare, neon_uimm3_bare>;
7320 defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
7321                              neon_uimm4_bare, neon_uimm3_bare>;
7322 defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
7323                              neon_uimm3_bare, neon_uimm2_bare>;
7324 defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
7325                              neon_uimm3_bare, neon_uimm2_bare>;
7326 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
7327                              neon_uimm2_bare, neon_uimm1_bare>;
7328 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
7329                              neon_uimm2_bare, neon_uimm1_bare>;
7330 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
7331                              neon_uimm1_bare, neon_uimm0_bare>;
7332 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
7333                              neon_uimm2_bare, neon_uimm1_bare>;
7334 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
7335                              neon_uimm2_bare, neon_uimm1_bare>;
7336 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
7337                              neon_uimm1_bare, neon_uimm0_bare>;
7338
7339 def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
7340           (v2f32 (DUPELT2s
7341             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
7342             (i64 0)))>;
7343 def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
7344           (v4f32 (DUPELT4s
7345             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
7346             (i64 0)))>;
7347 def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
7348           (v2f64 (DUPELT2d
7349             (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
7350             (i64 0)))>;
7351
7352 multiclass NeonI_DUP_pattern<Instruction DUPELT, ValueType ResTy,
7353                              ValueType OpTy, RegisterClass OpRC,
7354                              Operand OpNImm, SubRegIndex SubIndex> {
7355 def : Pat<(ResTy (Neon_vduplane (OpTy OpRC:$Rn), OpNImm:$Imm)),
7356           (ResTy (DUPELT
7357             (SUBREG_TO_REG (i64 0), OpRC:$Rn, SubIndex), OpNImm:$Imm))>;
7358 }
7359
7360 defm : NeonI_DUP_pattern<DUPELT4h, v4i16, v1i16, FPR16, neon_uimm2_bare,sub_16>;
7361 defm : NeonI_DUP_pattern<DUPELT4s, v4i32, v1i32, FPR32, neon_uimm2_bare,sub_32>;
7362 defm : NeonI_DUP_pattern<DUPELT8b, v8i8, v1i8, FPR8, neon_uimm3_bare, sub_8>;
7363 defm : NeonI_DUP_pattern<DUPELT8h, v8i16, v1i16, FPR16, neon_uimm3_bare,sub_16>;
7364 defm : NeonI_DUP_pattern<DUPELT16b, v16i8, v1i8, FPR8, neon_uimm4_bare, sub_8>;
7365
7366 class NeonI_DUP<bit Q, string asmop, string rdlane,
7367                 RegisterOperand ResVPR, ValueType ResTy,
7368                 RegisterClass OpGPR, ValueType OpTy>
7369   : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
7370                asmop # "\t$Rd" # rdlane # ", $Rn",
7371                [(set (ResTy ResVPR:$Rd),
7372                  (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
7373                NoItinerary>,
7374     Sched<[WriteFPALU, ReadFPALU]>;
7375
7376 def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
7377   let Inst{20-16} = 0b00001;
7378   // bits 17-20 are unspecified, but should be set to zero.
7379 }
7380
7381 def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
7382   let Inst{20-16} = 0b00010;
7383   // bits 18-20 are unspecified, but should be set to zero.
7384 }
7385
7386 def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
7387   let Inst{20-16} = 0b00100;
7388   // bits 19-20 are unspecified, but should be set to zero.
7389 }
7390
7391 def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
7392   let Inst{20-16} = 0b01000;
7393   // bit 20 is unspecified, but should be set to zero.
7394 }
7395
7396 def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
7397   let Inst{20-16} = 0b00001;
7398   // bits 17-20 are unspecified, but should be set to zero.
7399 }
7400
7401 def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
7402   let Inst{20-16} = 0b00010;
7403   // bits 18-20 are unspecified, but should be set to zero.
7404 }
7405
7406 def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
7407   let Inst{20-16} = 0b00100;
7408   // bits 19-20 are unspecified, but should be set to zero.
7409 }
7410
7411 // patterns for CONCAT_VECTORS
7412 multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
7413 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
7414           (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
7415 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
7416           (INSELd
7417             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7418             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
7419             (i64 1),
7420             (i64 0))>;
7421 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
7422           (DUPELT2d
7423             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7424             (i64 0))> ;
7425 }
7426
7427 defm : Concat_Vector_Pattern<v16i8, v8i8>;
7428 defm : Concat_Vector_Pattern<v8i16, v4i16>;
7429 defm : Concat_Vector_Pattern<v4i32, v2i32>;
7430 defm : Concat_Vector_Pattern<v2i64, v1i64>;
7431 defm : Concat_Vector_Pattern<v4f32, v2f32>;
7432 defm : Concat_Vector_Pattern<v2f64, v1f64>;
7433
7434 def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), undef)),
7435           (v2i32 (SUBREG_TO_REG(i64 0), $Rn, sub_32))>;
7436 def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
7437           (EXTRACT_SUBREG 
7438             (v4i32 (INSELs
7439               (v4i32 (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)),
7440               (v4i32 (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
7441               (i64 1),
7442               (i64 0))),
7443             sub_64)>;
7444 def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rn))),
7445           (DUPELT2s (v4i32 (SUBREG_TO_REG(i64 0), $Rn, sub_32)), 0)>;
7446
7447 //patterns for EXTRACT_SUBVECTOR
7448 def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
7449           (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7450 def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
7451           (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7452 def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
7453           (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7454 def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
7455           (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7456 def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
7457           (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7458 def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
7459           (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7460
7461 // The followings are for instruction class (3V Elem)
7462
7463 // Variant 1
7464
7465 class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
7466              string asmop, string ResS, string OpS, string EleOpS,
7467              Operand OpImm, RegisterOperand ResVPR,
7468              RegisterOperand OpVPR, RegisterOperand EleOpVPR>
7469   : NeonI_2VElem<q, u, size, opcode,
7470                  (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
7471                                          EleOpVPR:$Re, OpImm:$Index),
7472                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
7473                  ", $Re." # EleOpS # "[$Index]",
7474                  [],
7475                  NoItinerary>,
7476     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
7477   bits<3> Index;
7478   bits<5> Re;
7479
7480   let Constraints = "$src = $Rd";
7481 }
7482
7483 multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
7484   // vector register class for element is always 128-bit to cover the max index
7485   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7486                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
7487     let Inst{11} = {Index{1}};
7488     let Inst{21} = {Index{0}};
7489     let Inst{20-16} = Re;
7490   }
7491
7492   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7493                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
7494     let Inst{11} = {Index{1}};
7495     let Inst{21} = {Index{0}};
7496     let Inst{20-16} = Re;
7497   }
7498
7499   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7500   def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
7501                      neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
7502     let Inst{11} = {Index{2}};
7503     let Inst{21} = {Index{1}};
7504     let Inst{20} = {Index{0}};
7505     let Inst{19-16} = Re{3-0};
7506   }
7507
7508   def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
7509                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7510     let Inst{11} = {Index{2}};
7511     let Inst{21} = {Index{1}};
7512     let Inst{20} = {Index{0}};
7513     let Inst{19-16} = Re{3-0};
7514   }
7515 }
7516
7517 defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
7518 defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
7519
7520 // Pattern for lane in 128-bit vector
7521 class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
7522                    RegisterOperand ResVPR, RegisterOperand OpVPR,
7523                    RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
7524                    ValueType EleOpTy>
7525   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
7526           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7527         (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
7528
7529 // Pattern for lane in 64-bit vector
7530 class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
7531                   RegisterOperand ResVPR, RegisterOperand OpVPR,
7532                   RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
7533                   ValueType EleOpTy>
7534   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
7535           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7536         (INST ResVPR:$src, OpVPR:$Rn,
7537           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
7538
7539 multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
7540 {
7541   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
7542                      op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32>;
7543
7544   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
7545                      op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32>;
7546
7547   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
7548                      op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
7549
7550   def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
7551                      op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
7552
7553   // Index can only be half of the max value for lane in 64-bit vector
7554
7555   def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
7556                     op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32>;
7557
7558   def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
7559                     op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
7560 }
7561
7562 defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
7563 defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
7564
7565 class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
7566                  string asmop, string ResS, string OpS, string EleOpS,
7567                  Operand OpImm, RegisterOperand ResVPR,
7568                  RegisterOperand OpVPR, RegisterOperand EleOpVPR>
7569   : NeonI_2VElem<q, u, size, opcode,
7570                  (outs ResVPR:$Rd), (ins OpVPR:$Rn,
7571                                          EleOpVPR:$Re, OpImm:$Index),
7572                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
7573                  ", $Re." # EleOpS # "[$Index]",
7574                  [],
7575                  NoItinerary>,
7576     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
7577   bits<3> Index;
7578   bits<5> Re;
7579 }
7580
7581 multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
7582   // vector register class for element is always 128-bit to cover the max index
7583   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7584                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
7585     let Inst{11} = {Index{1}};
7586     let Inst{21} = {Index{0}};
7587     let Inst{20-16} = Re;
7588   }
7589
7590   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7591                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
7592     let Inst{11} = {Index{1}};
7593     let Inst{21} = {Index{0}};
7594     let Inst{20-16} = Re;
7595   }
7596
7597   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7598   def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
7599                          neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
7600     let Inst{11} = {Index{2}};
7601     let Inst{21} = {Index{1}};
7602     let Inst{20} = {Index{0}};
7603     let Inst{19-16} = Re{3-0};
7604   }
7605
7606   def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
7607                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7608     let Inst{11} = {Index{2}};
7609     let Inst{21} = {Index{1}};
7610     let Inst{20} = {Index{0}};
7611     let Inst{19-16} = Re{3-0};
7612   }
7613 }
7614
7615 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
7616 defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
7617 defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
7618 defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
7619 }
7620
7621 // Pattern for lane in 128-bit vector
7622 class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
7623                        RegisterOperand OpVPR, RegisterOperand EleOpVPR,
7624                        ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
7625   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
7626           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7627         (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
7628
7629 // Pattern for lane in 64-bit vector
7630 class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
7631                       RegisterOperand OpVPR, RegisterOperand EleOpVPR,
7632                       ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
7633   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
7634           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7635         (INST OpVPR:$Rn,
7636           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
7637
7638 multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
7639   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
7640                          op, VPR64, VPR128, v2i32, v2i32, v4i32>;
7641
7642   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
7643                          op, VPR128, VPR128, v4i32, v4i32, v4i32>;
7644
7645   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
7646                          op, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
7647
7648   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
7649                          op, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
7650
7651   // Index can only be half of the max value for lane in 64-bit vector
7652
7653   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
7654                         op, VPR64, VPR64, v2i32, v2i32, v2i32>;
7655
7656   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
7657                         op, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
7658 }
7659
7660 defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
7661 defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
7662 defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
7663
7664 // Variant 2
7665
7666 multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
7667   // vector register class for element is always 128-bit to cover the max index
7668   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7669                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
7670     let Inst{11} = {Index{1}};
7671     let Inst{21} = {Index{0}};
7672     let Inst{20-16} = Re;
7673   }
7674
7675   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7676                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
7677     let Inst{11} = {Index{1}};
7678     let Inst{21} = {Index{0}};
7679     let Inst{20-16} = Re;
7680   }
7681
7682   // _1d2d doesn't exist!
7683
7684   def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
7685                          neon_uimm1_bare, VPR128, VPR128, VPR128> {
7686     let Inst{11} = {Index{0}};
7687     let Inst{21} = 0b0;
7688     let Inst{20-16} = Re;
7689   }
7690 }
7691
7692 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
7693 defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
7694 defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
7695 }
7696
7697 class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
7698                          RegisterOperand OpVPR, RegisterOperand EleOpVPR,
7699                          ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
7700                          SDPatternOperator coreop>
7701   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
7702           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
7703         (INST OpVPR:$Rn,
7704           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
7705
7706 multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
7707   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
7708                          op, VPR64, VPR128, v2f32, v2f32, v4f32>;
7709
7710   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
7711                          op, VPR128, VPR128, v4f32, v4f32, v4f32>;
7712
7713   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
7714                          op, VPR128, VPR128, v2f64, v2f64, v2f64>;
7715
7716   // Index can only be half of the max value for lane in 64-bit vector
7717
7718   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
7719                         op, VPR64, VPR64, v2f32, v2f32, v2f32>;
7720
7721   def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
7722                            op, VPR128, VPR64, v2f64, v2f64, v1f64,
7723                            BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
7724 }
7725
7726 defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
7727 defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
7728
7729 def : Pat<(v2f32 (fmul (v2f32 (Neon_vdup (f32 FPR32:$Re))),
7730                        (v2f32 VPR64:$Rn))),
7731           (FMULve_2s4s VPR64:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7732
7733 def : Pat<(v4f32 (fmul (v4f32 (Neon_vdup (f32 FPR32:$Re))),
7734                        (v4f32 VPR128:$Rn))),
7735           (FMULve_4s4s VPR128:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7736
7737 def : Pat<(v2f64 (fmul (v2f64 (Neon_vdup (f64 FPR64:$Re))),
7738                        (v2f64 VPR128:$Rn))),
7739           (FMULve_2d2d VPR128:$Rn, (SUBREG_TO_REG (i64 0), $Re, sub_64), 0)>;
7740
7741 // The followings are patterns using fma
7742 // -ffp-contract=fast generates fma
7743
7744 multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
7745   // vector register class for element is always 128-bit to cover the max index
7746   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7747                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
7748     let Inst{11} = {Index{1}};
7749     let Inst{21} = {Index{0}};
7750     let Inst{20-16} = Re;
7751   }
7752
7753   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7754                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
7755     let Inst{11} = {Index{1}};
7756     let Inst{21} = {Index{0}};
7757     let Inst{20-16} = Re;
7758   }
7759
7760   // _1d2d doesn't exist!
7761
7762   def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
7763                      neon_uimm1_bare, VPR128, VPR128, VPR128> {
7764     let Inst{11} = {Index{0}};
7765     let Inst{21} = 0b0;
7766     let Inst{20-16} = Re;
7767   }
7768 }
7769
7770 defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
7771 defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
7772
7773 // Pattern for lane in 128-bit vector
7774 class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
7775                        RegisterOperand ResVPR, RegisterOperand OpVPR,
7776                        ValueType ResTy, ValueType OpTy,
7777                        SDPatternOperator coreop>
7778   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
7779                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
7780         (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
7781
7782 // Pattern for lane 0
7783 class NI_2VEfma_lane0<Instruction INST, SDPatternOperator op,
7784                       RegisterOperand ResVPR, ValueType ResTy>
7785   : Pat<(ResTy (op (ResTy ResVPR:$Rn),
7786                    (ResTy (Neon_vdup (f32 FPR32:$Re))),
7787                    (ResTy ResVPR:$src))),
7788         (INST ResVPR:$src, ResVPR:$Rn,
7789               (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7790
7791 // Pattern for lane in 64-bit vector
7792 class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
7793                       RegisterOperand ResVPR, RegisterOperand OpVPR,
7794                       ValueType ResTy, ValueType OpTy,
7795                       SDPatternOperator coreop>
7796   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
7797                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
7798         (INST ResVPR:$src, ResVPR:$Rn,
7799           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
7800
7801 // Pattern for lane in 64-bit vector
7802 class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
7803                            SDPatternOperator op,
7804                            RegisterOperand ResVPR, RegisterOperand OpVPR,
7805                            ValueType ResTy, ValueType OpTy,
7806                            SDPatternOperator coreop>
7807   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
7808                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
7809         (INST ResVPR:$src, ResVPR:$Rn,
7810           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
7811
7812
7813 multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
7814   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
7815                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
7816                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7817
7818   def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_2s4s"),
7819                         op, VPR64, v2f32>;
7820
7821   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
7822                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
7823                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7824
7825   def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_4s4s"),
7826                         op, VPR128, v4f32>;
7827
7828   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
7829                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
7830                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7831
7832   // Index can only be half of the max value for lane in 64-bit vector
7833
7834   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
7835                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
7836                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7837
7838   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
7839                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
7840                              BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
7841 }
7842
7843 defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
7844
7845 // Pattern for lane 0
7846 class NI_2VEfms_lane0<Instruction INST, SDPatternOperator op,
7847                       RegisterOperand ResVPR, ValueType ResTy>
7848   : Pat<(ResTy (op (ResTy (fneg ResVPR:$Rn)),
7849                    (ResTy (Neon_vdup (f32 FPR32:$Re))),
7850                    (ResTy ResVPR:$src))),
7851         (INST ResVPR:$src, ResVPR:$Rn,
7852               (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7853
7854 multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
7855 {
7856   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
7857                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
7858                          BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
7859
7860   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
7861                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
7862                          BinOpFrag<(Neon_vduplane
7863                                      (fneg node:$LHS), node:$RHS)>>;
7864
7865   def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_2s4s"),
7866                         op, VPR64, v2f32>;
7867
7868   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
7869                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
7870                          BinOpFrag<(fneg (Neon_vduplane
7871                                      node:$LHS, node:$RHS))>>;
7872
7873   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
7874                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
7875                          BinOpFrag<(Neon_vduplane
7876                                      (fneg node:$LHS), node:$RHS)>>;
7877
7878   def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_4s4s"),
7879                         op, VPR128, v4f32>;
7880
7881   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
7882                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
7883                          BinOpFrag<(fneg (Neon_vduplane
7884                                      node:$LHS, node:$RHS))>>;
7885
7886   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
7887                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
7888                          BinOpFrag<(Neon_vduplane
7889                                      (fneg node:$LHS), node:$RHS)>>;
7890
7891   // Index can only be half of the max value for lane in 64-bit vector
7892
7893   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
7894                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
7895                         BinOpFrag<(fneg (Neon_vduplane
7896                                     node:$LHS, node:$RHS))>>;
7897
7898   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
7899                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
7900                         BinOpFrag<(Neon_vduplane
7901                                     (fneg node:$LHS), node:$RHS)>>;
7902
7903   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
7904                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
7905                         BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
7906
7907   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
7908                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
7909                         BinOpFrag<(Neon_vduplane (fneg node:$LHS), node:$RHS)>>;
7910
7911   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
7912                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
7913                              BinOpFrag<(fneg (Neon_combine_2d
7914                                          node:$LHS, node:$RHS))>>;
7915
7916   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
7917                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
7918                              BinOpFrag<(Neon_combine_2d
7919                                          (fneg node:$LHS), (fneg node:$RHS))>>;
7920 }
7921
7922 defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
7923
7924 // Variant 3: Long type
7925 // E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
7926 //      SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
7927
7928 multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
7929   // vector register class for element is always 128-bit to cover the max index
7930   def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
7931                      neon_uimm2_bare, VPR128, VPR64, VPR128> {
7932     let Inst{11} = {Index{1}};
7933     let Inst{21} = {Index{0}};
7934     let Inst{20-16} = Re;
7935   }
7936
7937   def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
7938                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
7939     let Inst{11} = {Index{1}};
7940     let Inst{21} = {Index{0}};
7941     let Inst{20-16} = Re;
7942   }
7943
7944   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7945   def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
7946                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7947     let Inst{11} = {Index{2}};
7948     let Inst{21} = {Index{1}};
7949     let Inst{20} = {Index{0}};
7950     let Inst{19-16} = Re{3-0};
7951   }
7952
7953   def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
7954                      neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
7955     let Inst{11} = {Index{2}};
7956     let Inst{21} = {Index{1}};
7957     let Inst{20} = {Index{0}};
7958     let Inst{19-16} = Re{3-0};
7959   }
7960 }
7961
7962 defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
7963 defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
7964 defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
7965 defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
7966 defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
7967 defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
7968
7969 multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
7970   // vector register class for element is always 128-bit to cover the max index
7971   def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
7972                          neon_uimm2_bare, VPR128, VPR64, VPR128> {
7973     let Inst{11} = {Index{1}};
7974     let Inst{21} = {Index{0}};
7975     let Inst{20-16} = Re;
7976   }
7977
7978   def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
7979                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
7980     let Inst{11} = {Index{1}};
7981     let Inst{21} = {Index{0}};
7982     let Inst{20-16} = Re;
7983   }
7984
7985   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7986   def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
7987                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7988     let Inst{11} = {Index{2}};
7989     let Inst{21} = {Index{1}};
7990     let Inst{20} = {Index{0}};
7991     let Inst{19-16} = Re{3-0};
7992   }
7993
7994   def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
7995                          neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
7996     let Inst{11} = {Index{2}};
7997     let Inst{21} = {Index{1}};
7998     let Inst{20} = {Index{0}};
7999     let Inst{19-16} = Re{3-0};
8000   }
8001 }
8002
8003 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
8004 defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
8005 defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
8006 defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
8007 }
8008
8009 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
8010           (FMOVdd $src)>;
8011
8012 // Pattern for lane in 128-bit vector
8013 class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
8014                      RegisterOperand EleOpVPR, ValueType ResTy,
8015                      ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8016                      SDPatternOperator hiop>
8017   : Pat<(ResTy (op (ResTy VPR128:$src),
8018           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8019           (HalfOpTy (Neon_vduplane
8020                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8021         (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
8022
8023 // Pattern for lane in 64-bit vector
8024 class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
8025                     RegisterOperand EleOpVPR, ValueType ResTy,
8026                     ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8027                     SDPatternOperator hiop>
8028   : Pat<(ResTy (op (ResTy VPR128:$src),
8029           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8030           (HalfOpTy (Neon_vduplane
8031                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8032         (INST VPR128:$src, VPR128:$Rn,
8033           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
8034
8035 class NI_2VEL2_lane0<Instruction INST, SDPatternOperator op,
8036                      ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
8037                      SDPatternOperator hiop, Instruction DupInst>
8038   : Pat<(ResTy (op (ResTy VPR128:$src),
8039           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8040           (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
8041         (INST VPR128:$src, VPR128:$Rn, (DupInst $Re), 0)>;
8042
8043 multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
8044   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
8045                      op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
8046
8047   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
8048                      op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32>;
8049
8050   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
8051                        op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
8052
8053   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
8054                        op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
8055
8056   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
8057                        op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
8058
8059   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
8060                        op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
8061
8062   // Index can only be half of the max value for lane in 64-bit vector
8063
8064   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
8065                     op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
8066
8067   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
8068                     op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32>;
8069
8070   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
8071                       op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
8072
8073   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
8074                       op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
8075 }
8076
8077 defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
8078 defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
8079 defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
8080 defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
8081
8082 // Pattern for lane in 128-bit vector
8083 class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
8084                          RegisterOperand EleOpVPR, ValueType ResTy,
8085                          ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8086                          SDPatternOperator hiop>
8087   : Pat<(ResTy (op
8088           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8089           (HalfOpTy (Neon_vduplane
8090                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8091         (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
8092
8093 // Pattern for lane in 64-bit vector
8094 class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
8095                         RegisterOperand EleOpVPR, ValueType ResTy,
8096                         ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8097                         SDPatternOperator hiop>
8098   : Pat<(ResTy (op
8099           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8100           (HalfOpTy (Neon_vduplane
8101                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8102         (INST VPR128:$Rn,
8103           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
8104
8105 // Pattern for fixed lane 0
8106 class NI_2VEL2_mul_lane0<Instruction INST, SDPatternOperator op,
8107                          ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
8108                          SDPatternOperator hiop, Instruction DupInst>
8109   : Pat<(ResTy (op
8110           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8111           (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
8112         (INST VPR128:$Rn, (DupInst $Re), 0)>;
8113
8114 multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
8115   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
8116                          op, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
8117
8118   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
8119                          op, VPR64, VPR128, v2i64, v2i32, v4i32>;
8120
8121   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
8122                          op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
8123
8124   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
8125                            op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
8126
8127   def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_4s8h"),
8128                            op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
8129
8130   def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_2d4s"),
8131                            op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
8132
8133   // Index can only be half of the max value for lane in 64-bit vector
8134
8135   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
8136                         op, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
8137
8138   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
8139                         op, VPR64, VPR64, v2i64, v2i32, v2i32>;
8140
8141   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
8142                           op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
8143
8144   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
8145                           op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
8146 }
8147
8148 defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
8149 defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
8150 defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
8151
8152 multiclass NI_qdma<SDPatternOperator op> {
8153   def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
8154                     (op node:$Ra,
8155                       (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
8156
8157   def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
8158                     (op node:$Ra,
8159                       (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
8160 }
8161
8162 defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
8163 defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
8164
8165 multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
8166   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
8167                      !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
8168                      v4i32, v4i16, v8i16>;
8169
8170   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
8171                      !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
8172                      v2i64, v2i32, v4i32>;
8173
8174   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
8175                        !cast<PatFrag>(op # "_4s"), VPR128Lo,
8176                        v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
8177
8178   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
8179                        !cast<PatFrag>(op # "_2d"), VPR128,
8180                        v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
8181
8182   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
8183                        !cast<PatFrag>(op # "_4s"),
8184                        v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
8185
8186   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
8187                        !cast<PatFrag>(op # "_2d"),
8188                        v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
8189
8190   // Index can only be half of the max value for lane in 64-bit vector
8191
8192   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
8193                     !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
8194                     v4i32, v4i16, v4i16>;
8195
8196   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
8197                     !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
8198                     v2i64, v2i32, v2i32>;
8199
8200   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
8201                       !cast<PatFrag>(op # "_4s"), VPR64Lo,
8202                       v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
8203
8204   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
8205                       !cast<PatFrag>(op # "_2d"), VPR64,
8206                       v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
8207 }
8208
8209 defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
8210 defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
8211
8212 // End of implementation for instruction class (3V Elem)
8213
8214 class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
8215                 bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
8216                 SDPatternOperator Neon_Rev>
8217   : NeonI_2VMisc<Q, U, size, opcode,
8218                (outs ResVPR:$Rd), (ins ResVPR:$Rn),
8219                asmop # "\t$Rd." # Res # ", $Rn." # Res,
8220                [(set (ResTy ResVPR:$Rd),
8221                   (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
8222                NoItinerary>,
8223     Sched<[WriteFPALU, ReadFPALU]>;
8224
8225 def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
8226                           v16i8, Neon_rev64>;
8227 def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
8228                          v8i16, Neon_rev64>;
8229 def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
8230                          v4i32, Neon_rev64>;
8231 def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
8232                          v8i8, Neon_rev64>;
8233 def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
8234                          v4i16, Neon_rev64>;
8235 def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
8236                          v2i32, Neon_rev64>;
8237
8238 def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
8239 def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
8240
8241 def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
8242                           v16i8, Neon_rev32>;
8243 def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
8244                           v8i16, Neon_rev32>;
8245 def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
8246                          v8i8, Neon_rev32>;
8247 def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
8248                          v4i16, Neon_rev32>;
8249
8250 def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
8251                           v16i8, Neon_rev16>;
8252 def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
8253                          v8i8, Neon_rev16>;
8254
8255 multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
8256                              SDPatternOperator Neon_Padd> {
8257   def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8258                            (outs VPR128:$Rd), (ins VPR128:$Rn),
8259                            asmop # "\t$Rd.8h, $Rn.16b",
8260                            [(set (v8i16 VPR128:$Rd),
8261                               (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
8262                            NoItinerary>,
8263               Sched<[WriteFPALU, ReadFPALU]>;
8264
8265   def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8266                           (outs VPR64:$Rd), (ins VPR64:$Rn),
8267                           asmop # "\t$Rd.4h, $Rn.8b",
8268                           [(set (v4i16 VPR64:$Rd),
8269                              (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
8270                           NoItinerary>,
8271              Sched<[WriteFPALU, ReadFPALU]>;
8272
8273   def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8274                            (outs VPR128:$Rd), (ins VPR128:$Rn),
8275                            asmop # "\t$Rd.4s, $Rn.8h",
8276                            [(set (v4i32 VPR128:$Rd),
8277                               (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
8278                            NoItinerary>,
8279              Sched<[WriteFPALU, ReadFPALU]>;
8280
8281   def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8282                           (outs VPR64:$Rd), (ins VPR64:$Rn),
8283                           asmop # "\t$Rd.2s, $Rn.4h",
8284                           [(set (v2i32 VPR64:$Rd),
8285                              (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
8286                           NoItinerary>,
8287              Sched<[WriteFPALU, ReadFPALU]>;
8288
8289   def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
8290                            (outs VPR128:$Rd), (ins VPR128:$Rn),
8291                            asmop # "\t$Rd.2d, $Rn.4s",
8292                            [(set (v2i64 VPR128:$Rd),
8293                               (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
8294                            NoItinerary>,
8295              Sched<[WriteFPALU, ReadFPALU]>;
8296
8297   def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
8298                           (outs VPR64:$Rd), (ins VPR64:$Rn),
8299                           asmop # "\t$Rd.1d, $Rn.2s",
8300                           [(set (v1i64 VPR64:$Rd),
8301                              (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
8302                           NoItinerary>,
8303              Sched<[WriteFPALU, ReadFPALU]>;
8304 }
8305
8306 defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
8307                                 int_arm_neon_vpaddls>;
8308 defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
8309                                 int_arm_neon_vpaddlu>;
8310
8311 def : Pat<(v1i64 (int_aarch64_neon_saddlv (v2i32 VPR64:$Rn))),
8312           (SADDLP2s1d $Rn)>;
8313 def : Pat<(v1i64 (int_aarch64_neon_uaddlv (v2i32 VPR64:$Rn))),
8314           (UADDLP2s1d $Rn)>;
8315
8316 multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
8317                              SDPatternOperator Neon_Padd> {
8318   let Constraints = "$src = $Rd" in {
8319     def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8320                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8321                              asmop # "\t$Rd.8h, $Rn.16b",
8322                              [(set (v8i16 VPR128:$Rd),
8323                                 (v8i16 (Neon_Padd
8324                                   (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
8325                              NoItinerary>,
8326                 Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8327
8328     def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8329                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8330                             asmop # "\t$Rd.4h, $Rn.8b",
8331                             [(set (v4i16 VPR64:$Rd),
8332                                (v4i16 (Neon_Padd
8333                                  (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
8334                             NoItinerary>,
8335                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8336
8337     def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8338                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8339                             asmop # "\t$Rd.4s, $Rn.8h",
8340                             [(set (v4i32 VPR128:$Rd),
8341                                (v4i32 (Neon_Padd
8342                                  (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
8343                             NoItinerary>,
8344                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8345
8346     def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8347                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8348                             asmop # "\t$Rd.2s, $Rn.4h",
8349                             [(set (v2i32 VPR64:$Rd),
8350                                (v2i32 (Neon_Padd
8351                                  (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
8352                             NoItinerary>,
8353                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8354
8355     def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
8356                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8357                             asmop # "\t$Rd.2d, $Rn.4s",
8358                             [(set (v2i64 VPR128:$Rd),
8359                                (v2i64 (Neon_Padd
8360                                  (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
8361                             NoItinerary>,
8362                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8363
8364     def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
8365                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8366                             asmop # "\t$Rd.1d, $Rn.2s",
8367                             [(set (v1i64 VPR64:$Rd),
8368                                (v1i64 (Neon_Padd
8369                                  (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
8370                             NoItinerary>,
8371                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8372   }
8373 }
8374
8375 defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
8376                                    int_arm_neon_vpadals>;
8377 defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
8378                                    int_arm_neon_vpadalu>;
8379
8380 multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
8381   def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
8382                          (outs VPR128:$Rd), (ins VPR128:$Rn),
8383                          asmop # "\t$Rd.16b, $Rn.16b",
8384                          [], NoItinerary>,
8385             Sched<[WriteFPALU, ReadFPALU]>;
8386
8387   def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
8388                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8389                         asmop # "\t$Rd.8h, $Rn.8h",
8390                         [], NoItinerary>,
8391            Sched<[WriteFPALU, ReadFPALU]>;
8392
8393   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8394                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8395                         asmop # "\t$Rd.4s, $Rn.4s",
8396                         [], NoItinerary>,
8397            Sched<[WriteFPALU, ReadFPALU]>;
8398
8399   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
8400                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8401                         asmop # "\t$Rd.2d, $Rn.2d",
8402                         [], NoItinerary>,
8403            Sched<[WriteFPALU, ReadFPALU]>;
8404
8405   def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
8406                          (outs VPR64:$Rd), (ins VPR64:$Rn),
8407                          asmop # "\t$Rd.8b, $Rn.8b",
8408                          [], NoItinerary>,
8409            Sched<[WriteFPALU, ReadFPALU]>;
8410
8411   def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
8412                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8413                         asmop # "\t$Rd.4h, $Rn.4h",
8414                         [], NoItinerary>,
8415            Sched<[WriteFPALU, ReadFPALU]>;
8416
8417   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8418                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8419                         asmop # "\t$Rd.2s, $Rn.2s",
8420                         [], NoItinerary>,
8421            Sched<[WriteFPALU, ReadFPALU]>;
8422 }
8423
8424 defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
8425 defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
8426 defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
8427 defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
8428
8429 multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
8430                                           SDPatternOperator Neon_Op> {
8431   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
8432             (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
8433
8434   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
8435             (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
8436
8437   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
8438             (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
8439
8440   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
8441             (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
8442
8443   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
8444             (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
8445
8446   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
8447             (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
8448
8449   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
8450             (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
8451 }
8452
8453 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
8454 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
8455 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
8456
8457 def : Pat<(v16i8 (sub
8458             (v16i8 Neon_AllZero),
8459             (v16i8 VPR128:$Rn))),
8460           (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
8461 def : Pat<(v8i8 (sub
8462             (v8i8 Neon_AllZero),
8463             (v8i8 VPR64:$Rn))),
8464           (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
8465 def : Pat<(v8i16 (sub
8466             (v8i16 (bitconvert (v16i8 Neon_AllZero))),
8467             (v8i16 VPR128:$Rn))),
8468           (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
8469 def : Pat<(v4i16 (sub
8470             (v4i16 (bitconvert (v8i8 Neon_AllZero))),
8471             (v4i16 VPR64:$Rn))),
8472           (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
8473 def : Pat<(v4i32 (sub
8474             (v4i32 (bitconvert (v16i8 Neon_AllZero))),
8475             (v4i32 VPR128:$Rn))),
8476           (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
8477 def : Pat<(v2i32 (sub
8478             (v2i32 (bitconvert (v8i8 Neon_AllZero))),
8479             (v2i32 VPR64:$Rn))),
8480           (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
8481 def : Pat<(v2i64 (sub
8482             (v2i64 (bitconvert (v16i8 Neon_AllZero))),
8483             (v2i64 VPR128:$Rn))),
8484           (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
8485
8486 multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
8487   let Constraints = "$src = $Rd" in {
8488     def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
8489                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8490                            asmop # "\t$Rd.16b, $Rn.16b",
8491                            [], NoItinerary>,
8492               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8493
8494     def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
8495                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8496                           asmop # "\t$Rd.8h, $Rn.8h",
8497                           [], NoItinerary>,
8498              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8499
8500     def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8501                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8502                           asmop # "\t$Rd.4s, $Rn.4s",
8503                           [], NoItinerary>,
8504              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8505
8506     def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
8507                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8508                           asmop # "\t$Rd.2d, $Rn.2d",
8509                           [], NoItinerary>,
8510              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8511
8512     def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
8513                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8514                           asmop # "\t$Rd.8b, $Rn.8b",
8515                           [], NoItinerary>,
8516              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8517
8518     def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
8519                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8520                           asmop # "\t$Rd.4h, $Rn.4h",
8521                           [], NoItinerary>,
8522              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8523
8524     def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8525                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8526                           asmop # "\t$Rd.2s, $Rn.2s",
8527                           [], NoItinerary>,
8528              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8529   }
8530 }
8531
8532 defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
8533 defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
8534
8535 multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
8536                                            SDPatternOperator Neon_Op> {
8537   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
8538             (v16i8 (!cast<Instruction>(Prefix # 16b)
8539               (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
8540
8541   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
8542             (v8i16 (!cast<Instruction>(Prefix # 8h)
8543               (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
8544
8545   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
8546             (v4i32 (!cast<Instruction>(Prefix # 4s)
8547               (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
8548
8549   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
8550             (v2i64 (!cast<Instruction>(Prefix # 2d)
8551               (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
8552
8553   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
8554             (v8i8 (!cast<Instruction>(Prefix # 8b)
8555               (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
8556
8557   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
8558             (v4i16 (!cast<Instruction>(Prefix # 4h)
8559               (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
8560
8561   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
8562             (v2i32 (!cast<Instruction>(Prefix # 2s)
8563               (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
8564 }
8565
8566 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
8567 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
8568
8569 multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
8570                           SDPatternOperator Neon_Op> {
8571   def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
8572                          (outs VPR128:$Rd), (ins VPR128:$Rn),
8573                          asmop # "\t$Rd.16b, $Rn.16b",
8574                          [(set (v16i8 VPR128:$Rd),
8575                             (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
8576                          NoItinerary>,
8577             Sched<[WriteFPALU, ReadFPALU]>;
8578
8579   def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
8580                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8581                         asmop # "\t$Rd.8h, $Rn.8h",
8582                         [(set (v8i16 VPR128:$Rd),
8583                            (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
8584                         NoItinerary>,
8585            Sched<[WriteFPALU, ReadFPALU]>;
8586
8587   def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
8588                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8589                         asmop # "\t$Rd.4s, $Rn.4s",
8590                         [(set (v4i32 VPR128:$Rd),
8591                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
8592                         NoItinerary>,
8593            Sched<[WriteFPALU, ReadFPALU]>;
8594
8595   def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
8596                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8597                         asmop # "\t$Rd.8b, $Rn.8b",
8598                         [(set (v8i8 VPR64:$Rd),
8599                            (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
8600                         NoItinerary>,
8601            Sched<[WriteFPALU, ReadFPALU]>;
8602
8603   def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
8604                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8605                         asmop # "\t$Rd.4h, $Rn.4h",
8606                         [(set (v4i16 VPR64:$Rd),
8607                            (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
8608                         NoItinerary>,
8609            Sched<[WriteFPALU, ReadFPALU]>;
8610
8611   def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
8612                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8613                         asmop # "\t$Rd.2s, $Rn.2s",
8614                         [(set (v2i32 VPR64:$Rd),
8615                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
8616                         NoItinerary>,
8617            Sched<[WriteFPALU, ReadFPALU]>;
8618 }
8619
8620 defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
8621 defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
8622
8623 multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
8624                               bits<5> Opcode> {
8625   def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
8626                          (outs VPR128:$Rd), (ins VPR128:$Rn),
8627                          asmop # "\t$Rd.16b, $Rn.16b",
8628                          [], NoItinerary>,
8629             Sched<[WriteFPALU, ReadFPALU]>;
8630
8631   def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
8632                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8633                         asmop # "\t$Rd.8b, $Rn.8b",
8634                         [], NoItinerary>,
8635            Sched<[WriteFPALU, ReadFPALU]>;
8636 }
8637
8638 defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
8639 defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
8640 defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
8641
8642 def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
8643                     (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
8644 def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
8645                     (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
8646
8647 def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
8648           (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
8649 def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
8650           (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
8651
8652 def : Pat<(v16i8 (xor
8653             (v16i8 VPR128:$Rn),
8654             (v16i8 Neon_AllOne))),
8655           (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
8656 def : Pat<(v8i8 (xor
8657             (v8i8 VPR64:$Rn),
8658             (v8i8 Neon_AllOne))),
8659           (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
8660 def : Pat<(v8i16 (xor
8661             (v8i16 VPR128:$Rn),
8662             (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
8663           (NOT16b VPR128:$Rn)>;
8664 def : Pat<(v4i16 (xor
8665             (v4i16 VPR64:$Rn),
8666             (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
8667           (NOT8b VPR64:$Rn)>;
8668 def : Pat<(v4i32 (xor
8669             (v4i32 VPR128:$Rn),
8670             (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
8671           (NOT16b VPR128:$Rn)>;
8672 def : Pat<(v2i32 (xor
8673             (v2i32 VPR64:$Rn),
8674             (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
8675           (NOT8b VPR64:$Rn)>;
8676 def : Pat<(v2i64 (xor
8677             (v2i64 VPR128:$Rn),
8678             (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
8679           (NOT16b VPR128:$Rn)>;
8680
8681 def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
8682           (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
8683 def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
8684           (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
8685
8686 multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
8687                                 SDPatternOperator Neon_Op> {
8688   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8689                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8690                         asmop # "\t$Rd.4s, $Rn.4s",
8691                         [(set (v4f32 VPR128:$Rd),
8692                            (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
8693                         NoItinerary>,
8694            Sched<[WriteFPALU, ReadFPALU]>;
8695
8696   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
8697                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8698                         asmop # "\t$Rd.2d, $Rn.2d",
8699                         [(set (v2f64 VPR128:$Rd),
8700                            (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
8701                         NoItinerary>,
8702            Sched<[WriteFPALU, ReadFPALU]>;
8703
8704   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8705                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8706                         asmop # "\t$Rd.2s, $Rn.2s",
8707                         [(set (v2f32 VPR64:$Rd),
8708                            (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
8709                         NoItinerary>,
8710            Sched<[WriteFPALU, ReadFPALU]>;
8711 }
8712
8713 defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
8714 defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
8715
8716 multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
8717   def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
8718                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8719                           asmop # "\t$Rd.8b, $Rn.8h",
8720                           [], NoItinerary>,
8721              Sched<[WriteFPALU, ReadFPALU]>;
8722
8723   def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
8724                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8725                           asmop # "\t$Rd.4h, $Rn.4s",
8726                           [], NoItinerary>,
8727              Sched<[WriteFPALU, ReadFPALU]>;
8728
8729   def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8730                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8731                           asmop # "\t$Rd.2s, $Rn.2d",
8732                           [], NoItinerary>,
8733              Sched<[WriteFPALU, ReadFPALU]>;
8734
8735   let Constraints = "$Rd = $src" in {
8736     def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
8737                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8738                              asmop # "2\t$Rd.16b, $Rn.8h",
8739                              [], NoItinerary>,
8740                 Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8741
8742     def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
8743                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8744                             asmop # "2\t$Rd.8h, $Rn.4s",
8745                             [], NoItinerary>,
8746                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8747
8748     def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8749                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8750                             asmop # "2\t$Rd.4s, $Rn.2d",
8751                             [], NoItinerary>,
8752                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8753   }
8754 }
8755
8756 defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
8757 defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
8758 defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
8759 defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
8760
8761 multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix,
8762                                         SDPatternOperator Neon_Op> {
8763   def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
8764             (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
8765
8766   def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
8767             (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
8768
8769   def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
8770             (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
8771
8772   def : Pat<(v16i8 (concat_vectors
8773               (v8i8 VPR64:$src),
8774               (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
8775             (!cast<Instruction>(Prefix # 8h16b)
8776               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
8777               VPR128:$Rn)>;
8778
8779   def : Pat<(v8i16 (concat_vectors
8780               (v4i16 VPR64:$src),
8781               (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
8782             (!cast<Instruction>(Prefix # 4s8h)
8783               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
8784               VPR128:$Rn)>;
8785
8786   def : Pat<(v4i32 (concat_vectors
8787               (v2i32 VPR64:$src),
8788               (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
8789             (!cast<Instruction>(Prefix # 2d4s)
8790               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
8791               VPR128:$Rn)>;
8792 }
8793
8794 defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
8795 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
8796 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
8797 defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
8798
8799 multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
8800   let DecoderMethod = "DecodeSHLLInstruction" in {
8801     def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8802                             (outs VPR128:$Rd),
8803                             (ins VPR64:$Rn, uimm_exact8:$Imm),
8804                             asmop # "\t$Rd.8h, $Rn.8b, $Imm",
8805                             [], NoItinerary>,
8806                Sched<[WriteFPALU, ReadFPALU]>;
8807
8808     def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8809                             (outs VPR128:$Rd),
8810                             (ins VPR64:$Rn, uimm_exact16:$Imm),
8811                             asmop # "\t$Rd.4s, $Rn.4h, $Imm",
8812                             [], NoItinerary>,
8813                Sched<[WriteFPALU, ReadFPALU]>;
8814
8815     def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
8816                             (outs VPR128:$Rd),
8817                             (ins VPR64:$Rn, uimm_exact32:$Imm),
8818                             asmop # "\t$Rd.2d, $Rn.2s, $Imm",
8819                             [], NoItinerary>,
8820                Sched<[WriteFPALU, ReadFPALU]>;
8821
8822     def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8823                             (outs VPR128:$Rd),
8824                             (ins VPR128:$Rn, uimm_exact8:$Imm),
8825                             asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
8826                             [], NoItinerary>,
8827                 Sched<[WriteFPALU, ReadFPALU]>;
8828
8829     def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8830                             (outs VPR128:$Rd),
8831                             (ins VPR128:$Rn, uimm_exact16:$Imm),
8832                             asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
8833                             [], NoItinerary>,
8834                Sched<[WriteFPALU, ReadFPALU]>;
8835
8836     def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
8837                             (outs VPR128:$Rd),
8838                             (ins VPR128:$Rn, uimm_exact32:$Imm),
8839                             asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
8840                             [], NoItinerary>,
8841                Sched<[WriteFPALU, ReadFPALU]>;
8842   }
8843 }
8844
8845 defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
8846
8847 class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
8848                           SDPatternOperator ExtOp, Operand Neon_Imm,
8849                           string suffix>
8850   : Pat<(DesTy (shl
8851           (DesTy (ExtOp (OpTy VPR64:$Rn))),
8852             (DesTy (Neon_vdup
8853               (i32 Neon_Imm:$Imm))))),
8854         (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
8855
8856 class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
8857                                SDPatternOperator ExtOp, Operand Neon_Imm,
8858                                string suffix, PatFrag GetHigh>
8859   : Pat<(DesTy (shl
8860           (DesTy (ExtOp
8861             (OpTy (GetHigh VPR128:$Rn)))),
8862               (DesTy (Neon_vdup
8863                 (i32 Neon_Imm:$Imm))))),
8864         (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
8865
8866 def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
8867 def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
8868 def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
8869 def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
8870 def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
8871 def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
8872 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
8873                                Neon_High16B>;
8874 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
8875                                Neon_High16B>;
8876 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
8877                                Neon_High8H>;
8878 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
8879                                Neon_High8H>;
8880 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
8881                                Neon_High4S>;
8882 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
8883                                Neon_High4S>;
8884
8885 multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
8886   def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8887                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8888                           asmop # "\t$Rd.4h, $Rn.4s",
8889                           [], NoItinerary>,
8890              Sched<[WriteFPALU, ReadFPALU]>;
8891
8892   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8893                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8894                           asmop # "\t$Rd.2s, $Rn.2d",
8895                           [], NoItinerary>,
8896              Sched<[WriteFPALU, ReadFPALU]>;
8897
8898   let Constraints = "$src = $Rd" in {
8899     def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8900                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8901                             asmop # "2\t$Rd.8h, $Rn.4s",
8902                             [], NoItinerary>,
8903                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8904
8905     def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8906                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8907                             asmop # "2\t$Rd.4s, $Rn.2d",
8908                             [], NoItinerary>,
8909                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8910   }
8911 }
8912
8913 defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
8914
8915 multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
8916                                        SDPatternOperator f32_to_f16_Op,
8917                                        SDPatternOperator f64_to_f32_Op> {
8918
8919   def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
8920               (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
8921
8922   def : Pat<(v8i16 (concat_vectors
8923                 (v4i16 VPR64:$src),
8924                 (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
8925                   (!cast<Instruction>(prefix # "4s8h")
8926                     (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
8927                     (v4f32 VPR128:$Rn))>;
8928
8929   def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
8930             (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
8931
8932   def : Pat<(v4f32 (concat_vectors
8933               (v2f32 VPR64:$src),
8934               (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
8935                 (!cast<Instruction>(prefix # "2d4s")
8936                   (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
8937                   (v2f64 VPR128:$Rn))>;
8938 }
8939
8940 defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
8941
8942 multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
8943                                  bits<5> opcode> {
8944   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8945                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8946                           asmop # "\t$Rd.2s, $Rn.2d",
8947                           [], NoItinerary>,
8948              Sched<[WriteFPALU, ReadFPALU]>;
8949
8950   def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8951                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8952                           asmop # "2\t$Rd.4s, $Rn.2d",
8953                           [], NoItinerary>,
8954              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
8955     let Constraints = "$src = $Rd";
8956   }
8957
8958   def : Pat<(v2f32 (int_aarch64_neon_vcvtxn (v2f64 VPR128:$Rn))),
8959             (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
8960
8961   def : Pat<(v4f32 (concat_vectors
8962               (v2f32 VPR64:$src),
8963               (v2f32 (int_aarch64_neon_vcvtxn (v2f64 VPR128:$Rn))))),
8964             (!cast<Instruction>(prefix # "2d4s")
8965                (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
8966                VPR128:$Rn)>;
8967 }
8968
8969 defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
8970
8971 def Neon_High4Float : PatFrag<(ops node:$in),
8972                               (extract_subvector (v4f32 node:$in), (iPTR 2))>;
8973
8974 multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
8975   def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
8976                           (outs VPR128:$Rd), (ins VPR64:$Rn),
8977                           asmop # "\t$Rd.4s, $Rn.4h",
8978                           [], NoItinerary>,
8979              Sched<[WriteFPALU, ReadFPALU]>;
8980
8981   def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
8982                           (outs VPR128:$Rd), (ins VPR64:$Rn),
8983                           asmop # "\t$Rd.2d, $Rn.2s",
8984                           [], NoItinerary>,
8985              Sched<[WriteFPALU, ReadFPALU]>;
8986
8987   def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
8988                           (outs VPR128:$Rd), (ins VPR128:$Rn),
8989                           asmop # "2\t$Rd.4s, $Rn.8h",
8990                           [], NoItinerary>,
8991              Sched<[WriteFPALU, ReadFPALU]>;
8992
8993   def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
8994                           (outs VPR128:$Rd), (ins VPR128:$Rn),
8995                           asmop # "2\t$Rd.2d, $Rn.4s",
8996                           [], NoItinerary>,
8997              Sched<[WriteFPALU, ReadFPALU]>;
8998 }
8999
9000 defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
9001
9002 multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
9003   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
9004             (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
9005
9006   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
9007               (v4i16 (Neon_High8H
9008                 (v8i16 VPR128:$Rn))))),
9009             (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
9010
9011   def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
9012             (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
9013
9014   def : Pat<(v2f64 (fextend
9015               (v2f32 (Neon_High4Float
9016                 (v4f32 VPR128:$Rn))))),
9017             (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
9018 }
9019
9020 defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
9021
9022 multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
9023                                 ValueType ResTy4s, ValueType OpTy4s,
9024                                 ValueType ResTy2d, ValueType OpTy2d,
9025                                 ValueType ResTy2s, ValueType OpTy2s,
9026                                 SDPatternOperator Neon_Op> {
9027
9028   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
9029                         (outs VPR128:$Rd), (ins VPR128:$Rn),
9030                         asmop # "\t$Rd.4s, $Rn.4s",
9031                         [(set (ResTy4s VPR128:$Rd),
9032                            (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
9033                         NoItinerary>,
9034            Sched<[WriteFPALU, ReadFPALU]>;
9035
9036   def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
9037                         (outs VPR128:$Rd), (ins VPR128:$Rn),
9038                         asmop # "\t$Rd.2d, $Rn.2d",
9039                         [(set (ResTy2d VPR128:$Rd),
9040                            (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
9041                         NoItinerary>,
9042            Sched<[WriteFPALU, ReadFPALU]>;
9043
9044   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
9045                         (outs VPR64:$Rd), (ins VPR64:$Rn),
9046                         asmop # "\t$Rd.2s, $Rn.2s",
9047                         [(set (ResTy2s VPR64:$Rd),
9048                            (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
9049                         NoItinerary>,
9050            Sched<[WriteFPALU, ReadFPALU]>;
9051 }
9052
9053 multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
9054                                   bits<5> opcode, SDPatternOperator Neon_Op> {
9055   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
9056                                 v2f64, v2i32, v2f32, Neon_Op>;
9057 }
9058
9059 defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
9060                                      int_arm_neon_vcvtns>;
9061 defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
9062                                      int_arm_neon_vcvtnu>;
9063 defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
9064                                      int_arm_neon_vcvtps>;
9065 defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
9066                                      int_arm_neon_vcvtpu>;
9067 defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
9068                                      int_arm_neon_vcvtms>;
9069 defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
9070                                      int_arm_neon_vcvtmu>;
9071 defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
9072 defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
9073 defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
9074                                      int_arm_neon_vcvtas>;
9075 defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
9076                                      int_arm_neon_vcvtau>;
9077
9078 multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
9079                                   bits<5> opcode, SDPatternOperator Neon_Op> {
9080   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
9081                                 v2i64, v2f32, v2i32, Neon_Op>;
9082 }
9083
9084 defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
9085 defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
9086
9087 multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
9088                                  bits<5> opcode, SDPatternOperator Neon_Op> {
9089   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
9090                                 v2f64, v2f32, v2f32, Neon_Op>;
9091 }
9092
9093 defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
9094                                      int_aarch64_neon_frintn>;
9095 defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
9096 defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
9097 defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
9098 defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
9099 defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
9100 defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
9101 defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
9102                                     int_arm_neon_vrecpe>;
9103 defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
9104                                      int_arm_neon_vrsqrte>;
9105 let SchedRW = [WriteFPSqrt, ReadFPSqrt] in {
9106 defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111, fsqrt>;
9107 }
9108
9109 multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
9110                                bits<5> opcode, SDPatternOperator Neon_Op> {
9111   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
9112                         (outs VPR128:$Rd), (ins VPR128:$Rn),
9113                         asmop # "\t$Rd.4s, $Rn.4s",
9114                         [(set (v4i32 VPR128:$Rd),
9115                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
9116                         NoItinerary>,
9117            Sched<[WriteFPALU, ReadFPALU]>;
9118
9119   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
9120                         (outs VPR64:$Rd), (ins VPR64:$Rn),
9121                         asmop # "\t$Rd.2s, $Rn.2s",
9122                         [(set (v2i32 VPR64:$Rd),
9123                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
9124                         NoItinerary>,
9125            Sched<[WriteFPALU, ReadFPALU]>;
9126 }
9127
9128 defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
9129                                   int_arm_neon_vrecpe>;
9130 defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
9131                                    int_arm_neon_vrsqrte>;
9132
9133 // Crypto Class
9134 class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
9135                          string asmop, SDPatternOperator opnode>
9136   : NeonI_Crypto_AES<size, opcode,
9137                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
9138                      asmop # "\t$Rd.16b, $Rn.16b",
9139                      [(set (v16i8 VPR128:$Rd),
9140                         (v16i8 (opnode (v16i8 VPR128:$src),
9141                                        (v16i8 VPR128:$Rn))))],
9142                      NoItinerary>,
9143     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
9144   let Constraints = "$src = $Rd";
9145   let Predicates = [HasNEON, HasCrypto];
9146 }
9147
9148 def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
9149 def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
9150
9151 class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
9152                       string asmop, SDPatternOperator opnode>
9153   : NeonI_Crypto_AES<size, opcode,
9154                      (outs VPR128:$Rd), (ins VPR128:$Rn),
9155                      asmop # "\t$Rd.16b, $Rn.16b",
9156                      [(set (v16i8 VPR128:$Rd),
9157                         (v16i8 (opnode (v16i8 VPR128:$Rn))))],
9158                      NoItinerary>,
9159     Sched<[WriteFPALU, ReadFPALU]>;
9160
9161 def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
9162 def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
9163
9164 class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
9165                          string asmop, SDPatternOperator opnode>
9166   : NeonI_Crypto_SHA<size, opcode,
9167                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
9168                      asmop # "\t$Rd.4s, $Rn.4s",
9169                      [(set (v4i32 VPR128:$Rd),
9170                         (v4i32 (opnode (v4i32 VPR128:$src),
9171                                        (v4i32 VPR128:$Rn))))],
9172                      NoItinerary>,
9173     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
9174   let Constraints = "$src = $Rd";
9175   let Predicates = [HasNEON, HasCrypto];
9176 }
9177
9178 def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
9179                                  int_arm_neon_sha1su1>;
9180 def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
9181                                    int_arm_neon_sha256su0>;
9182
9183 class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
9184                          string asmop, SDPatternOperator opnode>
9185   : NeonI_Crypto_SHA<size, opcode,
9186                      (outs FPR32:$Rd), (ins FPR32:$Rn),
9187                      asmop # "\t$Rd, $Rn",
9188                      [], NoItinerary>,
9189     Sched<[WriteFPALU, ReadFPALU]> {
9190   let Predicates = [HasNEON, HasCrypto];
9191   let hasSideEffects = 0;
9192 }
9193
9194 def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
9195 def : Pat<(i32 (int_arm_neon_sha1h i32:$Rn)),
9196           (COPY_TO_REGCLASS (SHA1H (COPY_TO_REGCLASS i32:$Rn, FPR32)), GPR32)>;
9197
9198
9199 class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
9200                            SDPatternOperator opnode>
9201   : NeonI_Crypto_3VSHA<size, opcode,
9202                        (outs VPR128:$Rd),
9203                        (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
9204                        asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
9205                        [(set (v4i32 VPR128:$Rd),
9206                           (v4i32 (opnode (v4i32 VPR128:$src),
9207                                          (v4i32 VPR128:$Rn),
9208                                          (v4i32 VPR128:$Rm))))],
9209                        NoItinerary>,
9210     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
9211   let Constraints = "$src = $Rd";
9212   let Predicates = [HasNEON, HasCrypto];
9213 }
9214
9215 def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
9216                                    int_arm_neon_sha1su0>;
9217 def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
9218                                      int_arm_neon_sha256su1>;
9219
9220 class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
9221                            SDPatternOperator opnode>
9222   : NeonI_Crypto_3VSHA<size, opcode,
9223                        (outs FPR128:$Rd),
9224                        (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
9225                        asmop # "\t$Rd, $Rn, $Rm.4s",
9226                        [(set (v4i32 FPR128:$Rd),
9227                           (v4i32 (opnode (v4i32 FPR128:$src),
9228                                          (v4i32 FPR128:$Rn),
9229                                          (v4i32 VPR128:$Rm))))],
9230                        NoItinerary>,
9231     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
9232   let Constraints = "$src = $Rd";
9233   let Predicates = [HasNEON, HasCrypto];
9234 }
9235
9236 def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
9237                                    int_arm_neon_sha256h>;
9238 def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
9239                                     int_arm_neon_sha256h2>;
9240
9241 class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop>
9242   : NeonI_Crypto_3VSHA<size, opcode,
9243                        (outs FPR128:$Rd),
9244                        (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
9245                        asmop # "\t$Rd, $Rn, $Rm.4s",
9246                        [], NoItinerary>,
9247     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
9248   let Constraints = "$src = $Rd";
9249   let hasSideEffects = 0;
9250   let Predicates = [HasNEON, HasCrypto];
9251 }
9252
9253 def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c">;
9254 def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p">;
9255 def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m">;
9256
9257 def : Pat<(int_arm_neon_sha1c v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
9258           (SHA1C v4i32:$hash_abcd,
9259                  (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
9260 def : Pat<(int_arm_neon_sha1m v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
9261           (SHA1M v4i32:$hash_abcd,
9262                  (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
9263 def : Pat<(int_arm_neon_sha1p v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
9264           (SHA1P v4i32:$hash_abcd,
9265                  (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
9266
9267 // Additional patterns to match shl to USHL.
9268 def : Pat<(v8i8 (shl (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
9269           (USHLvvv_8B $Rn, $Rm)>;
9270 def : Pat<(v4i16 (shl (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
9271           (USHLvvv_4H $Rn, $Rm)>;
9272 def : Pat<(v2i32 (shl (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
9273           (USHLvvv_2S $Rn, $Rm)>;
9274 def : Pat<(v1i64 (shl (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
9275           (USHLddd $Rn, $Rm)>;
9276 def : Pat<(v16i8 (shl (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
9277           (USHLvvv_16B $Rn, $Rm)>;
9278 def : Pat<(v8i16 (shl (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
9279           (USHLvvv_8H $Rn, $Rm)>;
9280 def : Pat<(v4i32 (shl (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
9281           (USHLvvv_4S $Rn, $Rm)>;
9282 def : Pat<(v2i64 (shl (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
9283           (USHLvvv_2D $Rn, $Rm)>;
9284
9285 def : Pat<(v1i8 (shl (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
9286           (EXTRACT_SUBREG
9287               (USHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
9288                           (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
9289               sub_8)>;
9290 def : Pat<(v1i16 (shl (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
9291           (EXTRACT_SUBREG
9292               (USHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
9293                           (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
9294               sub_16)>;
9295 def : Pat<(v1i32 (shl (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
9296           (EXTRACT_SUBREG
9297               (USHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
9298                           (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
9299               sub_32)>;
9300
9301 // Additional patterns to match sra, srl.
9302 // For a vector right shift by vector, the shift amounts of SSHL/USHL are
9303 // negative. Negate the vector of shift amount first.
9304 def : Pat<(v8i8 (srl (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
9305           (USHLvvv_8B $Rn, (NEG8b $Rm))>;
9306 def : Pat<(v4i16 (srl (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
9307           (USHLvvv_4H $Rn, (NEG4h $Rm))>;
9308 def : Pat<(v2i32 (srl (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
9309           (USHLvvv_2S $Rn, (NEG2s $Rm))>;
9310 def : Pat<(v1i64 (srl (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
9311           (USHLddd $Rn, (NEGdd $Rm))>;
9312 def : Pat<(v16i8 (srl (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
9313           (USHLvvv_16B $Rn, (NEG16b $Rm))>;
9314 def : Pat<(v8i16 (srl (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
9315           (USHLvvv_8H $Rn, (NEG8h $Rm))>;
9316 def : Pat<(v4i32 (srl (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
9317           (USHLvvv_4S $Rn, (NEG4s $Rm))>;
9318 def : Pat<(v2i64 (srl (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
9319           (USHLvvv_2D $Rn, (NEG2d $Rm))>;
9320
9321 def : Pat<(v1i8 (srl (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
9322           (EXTRACT_SUBREG
9323               (USHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
9324                           (NEG8b (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8))),
9325               sub_8)>;
9326 def : Pat<(v1i16 (srl (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
9327           (EXTRACT_SUBREG
9328               (USHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
9329                           (NEG4h (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16))),
9330               sub_16)>;
9331 def : Pat<(v1i32 (srl (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
9332           (EXTRACT_SUBREG
9333               (USHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
9334                           (NEG2s (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32))),
9335               sub_32)>;
9336
9337 def : Pat<(v8i8 (sra (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
9338           (SSHLvvv_8B $Rn, (NEG8b $Rm))>;
9339 def : Pat<(v4i16 (sra (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
9340           (SSHLvvv_4H $Rn, (NEG4h $Rm))>;
9341 def : Pat<(v2i32 (sra (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
9342           (SSHLvvv_2S $Rn, (NEG2s $Rm))>;
9343 def : Pat<(v1i64 (sra (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
9344           (SSHLddd $Rn, (NEGdd $Rm))>;
9345 def : Pat<(v16i8 (sra (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
9346           (SSHLvvv_16B $Rn, (NEG16b $Rm))>;
9347 def : Pat<(v8i16 (sra (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
9348           (SSHLvvv_8H $Rn, (NEG8h $Rm))>;
9349 def : Pat<(v4i32 (sra (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
9350           (SSHLvvv_4S $Rn, (NEG4s $Rm))>;
9351 def : Pat<(v2i64 (sra (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
9352           (SSHLvvv_2D $Rn, (NEG2d $Rm))>;
9353
9354 def : Pat<(v1i8 (sra (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
9355           (EXTRACT_SUBREG
9356               (SSHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
9357                           (NEG8b (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8))),
9358               sub_8)>;
9359 def : Pat<(v1i16 (sra (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
9360           (EXTRACT_SUBREG
9361               (SSHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
9362                           (NEG4h (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16))),
9363               sub_16)>;
9364 def : Pat<(v1i32 (sra (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
9365           (EXTRACT_SUBREG
9366               (SSHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
9367                           (NEG2s (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32))),
9368               sub_32)>;
9369
9370 //
9371 // Patterns for handling half-precision values
9372 //
9373
9374 // Convert between f16 value and f32 value
9375 def : Pat<(f32 (f16_to_f32 (i32 GPR32:$Rn))),
9376           (FCVTsh (EXTRACT_SUBREG (FMOVsw $Rn), sub_16))>;
9377 def : Pat<(i32 (f32_to_f16 (f32 FPR32:$Rn))),
9378           (FMOVws (SUBREG_TO_REG (i64 0), (f16 (FCVThs $Rn)), sub_16))>;
9379
9380 // Convert f16 value coming in as i16 value to f32
9381 def : Pat<(f32 (f16_to_f32 (i32 (and (i32 GPR32:$Rn), 65535)))),
9382           (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
9383 def : Pat<(f32 (f16_to_f32 (i32 (assertzext GPR32:$Rn)))),
9384           (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
9385
9386 def : Pat<(f32 (f16_to_f32 (i32 (assertzext (i32 (
9387             f32_to_f16 (f32 FPR32:$Rn))))))),
9388           (f32 FPR32:$Rn)>;
9389
9390 // Patterns for vector extract of half-precision FP value in i16 storage type
9391 def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
9392             (v4i16 VPR64:$Rn), neon_uimm2_bare:$Imm)), 65535)))),
9393           (FCVTsh (f16 (DUPhv_H
9394             (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9395             neon_uimm2_bare:$Imm)))>;
9396
9397 def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
9398             (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)), 65535)))),
9399           (FCVTsh (f16 (DUPhv_H (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)))>;
9400
9401 // Patterns for vector insert of half-precision FP value 0 in i16 storage type
9402 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9403             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
9404             (neon_uimm3_bare:$Imm))),
9405           (v8i16 (INSELh (v8i16 VPR128:$Rn),
9406             (v8i16 (SUBREG_TO_REG (i64 0),
9407               (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
9408               sub_16)),
9409             neon_uimm3_bare:$Imm, 0))>;
9410
9411 def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
9412             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
9413             (neon_uimm2_bare:$Imm))),
9414           (v4i16 (EXTRACT_SUBREG
9415             (v8i16 (INSELh
9416               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9417               (v8i16 (SUBREG_TO_REG (i64 0),
9418                 (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
9419                 sub_16)),
9420               neon_uimm2_bare:$Imm, 0)),
9421             sub_64))>;
9422
9423 // Patterns for vector insert of half-precision FP value in i16 storage type
9424 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9425             (i32 (assertsext (i32 (fp_to_sint
9426               (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
9427             (neon_uimm3_bare:$Imm))),
9428           (v8i16 (INSELh (v8i16 VPR128:$Rn),
9429             (v8i16 (SUBREG_TO_REG (i64 0),
9430               (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
9431               sub_16)),
9432             neon_uimm3_bare:$Imm, 0))>;
9433
9434 def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
9435             (i32 (assertsext (i32 (fp_to_sint
9436               (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
9437             (neon_uimm2_bare:$Imm))),
9438           (v4i16 (EXTRACT_SUBREG
9439             (v8i16 (INSELh
9440               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9441               (v8i16 (SUBREG_TO_REG (i64 0),
9442                 (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
9443                 sub_16)),
9444               neon_uimm2_bare:$Imm, 0)),
9445             sub_64))>;
9446
9447 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9448             (i32 (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
9449               (neon_uimm3_bare:$Imm1))),
9450           (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
9451             neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
9452
9453 // Patterns for vector copy of half-precision FP value in i16 storage type
9454 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9455             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
9456               (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
9457               65535)))))))),
9458             (neon_uimm3_bare:$Imm1))),
9459           (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
9460             neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
9461
9462 def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
9463             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
9464               (vector_extract (v4i16 VPR64:$src), neon_uimm3_bare:$Imm2)),
9465               65535)))))))),
9466             (neon_uimm3_bare:$Imm1))),
9467           (v4i16 (EXTRACT_SUBREG
9468             (v8i16 (INSELh
9469               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9470               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
9471               neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2)),
9472             sub_64))>;
9473
9474