Remove unused argument.
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrNEON.td
1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the AArch64 NEON instruction set.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17
18 // (outs Result), (ins Imm, OpCmode)
19 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
20
21 def Neon_movi     : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
22
23 def Neon_mvni     : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
24
25 // (outs Result), (ins Imm)
26 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
27                         [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
28
29 // (outs Result), (ins LHS, RHS, CondCode)
30 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
31                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
32
33 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
34 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
35                  [SDTCisVec<0>,  SDTCisVec<1>]>>;
36
37 // (outs Result), (ins LHS, RHS)
38 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
39                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
40
41 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
42                                      SDTCisVT<2, i32>]>;
43 def Neon_sqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
44 def Neon_uqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
45
46 def SDTPERMUTE : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
47                                SDTCisSameAs<0, 2>]>;
48 def Neon_uzp1    : SDNode<"AArch64ISD::NEON_UZP1", SDTPERMUTE>;
49 def Neon_uzp2    : SDNode<"AArch64ISD::NEON_UZP2", SDTPERMUTE>;
50 def Neon_zip1    : SDNode<"AArch64ISD::NEON_ZIP1", SDTPERMUTE>;
51 def Neon_zip2    : SDNode<"AArch64ISD::NEON_ZIP2", SDTPERMUTE>;
52 def Neon_trn1    : SDNode<"AArch64ISD::NEON_TRN1", SDTPERMUTE>;
53 def Neon_trn2    : SDNode<"AArch64ISD::NEON_TRN2", SDTPERMUTE>;
54
55 def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
56 def Neon_rev64    : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
57 def Neon_rev32    : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
58 def Neon_rev16    : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
59 def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
60                        [SDTCisVec<0>]>>;
61 def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
62                            [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
63 def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
64                            [SDTCisVec<0>,  SDTCisSameAs<0, 1>,
65                            SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
66
67 //===----------------------------------------------------------------------===//
68 // Addressing-mode instantiations
69 //===----------------------------------------------------------------------===//
70
71 multiclass ls_64_pats<dag address, dag Base, dag Offset, ValueType Ty> {
72 defm : ls_neutral_pats<LSFP64_LDR, LSFP64_STR, Base,
73                       !foreach(decls.pattern, Offset,
74                                !subst(OFFSET, dword_uimm12, decls.pattern)),
75                       !foreach(decls.pattern, address,
76                                !subst(OFFSET, dword_uimm12,
77                                !subst(ALIGN, min_align8, decls.pattern))),
78                       Ty>;
79 }
80
81 multiclass ls_128_pats<dag address, dag Base, dag Offset, ValueType Ty> {
82 defm : ls_neutral_pats<LSFP128_LDR, LSFP128_STR, Base,
83                        !foreach(decls.pattern, Offset,
84                                 !subst(OFFSET, qword_uimm12, decls.pattern)),
85                        !foreach(decls.pattern, address,
86                                 !subst(OFFSET, qword_uimm12,
87                                 !subst(ALIGN, min_align16, decls.pattern))),
88                       Ty>;
89 }
90
91 multiclass uimm12_neon_pats<dag address, dag Base, dag Offset> {
92   defm : ls_64_pats<address, Base, Offset, v8i8>;
93   defm : ls_64_pats<address, Base, Offset, v4i16>;
94   defm : ls_64_pats<address, Base, Offset, v2i32>;
95   defm : ls_64_pats<address, Base, Offset, v1i64>;
96   defm : ls_64_pats<address, Base, Offset, v2f32>;
97   defm : ls_64_pats<address, Base, Offset, v1f64>;
98
99   defm : ls_128_pats<address, Base, Offset, v16i8>;
100   defm : ls_128_pats<address, Base, Offset, v8i16>;
101   defm : ls_128_pats<address, Base, Offset, v4i32>;
102   defm : ls_128_pats<address, Base, Offset, v2i64>;
103   defm : ls_128_pats<address, Base, Offset, v4f32>;
104   defm : ls_128_pats<address, Base, Offset, v2f64>;
105 }
106
107 defm : uimm12_neon_pats<(A64WrapperSmall
108                           tconstpool:$Hi, tconstpool:$Lo12, ALIGN),
109                         (ADRPxi tconstpool:$Hi), (i64 tconstpool:$Lo12)>;
110
111 //===----------------------------------------------------------------------===//
112 // Multiclasses
113 //===----------------------------------------------------------------------===//
114
115 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size,  bits<5> opcode,
116                                 string asmop, SDPatternOperator opnode8B,
117                                 SDPatternOperator opnode16B,
118                                 bit Commutable = 0> {
119   let isCommutable = Commutable in {
120     def _8B :  NeonI_3VSame<0b0, u, size, opcode,
121                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
122                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
123                [(set (v8i8 VPR64:$Rd),
124                   (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
125                NoItinerary>,
126                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
127
128     def _16B : NeonI_3VSame<0b1, u, size, opcode,
129                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
130                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
131                [(set (v16i8 VPR128:$Rd),
132                   (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
133                NoItinerary>,
134                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
135   }
136
137 }
138
139 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
140                                   string asmop, SDPatternOperator opnode,
141                                   bit Commutable = 0> {
142   let isCommutable = Commutable in {
143     def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
144               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
145               asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
146               [(set (v4i16 VPR64:$Rd),
147                  (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
148               NoItinerary>,
149               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
150
151     def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
152               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
153               asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
154               [(set (v8i16 VPR128:$Rd),
155                  (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
156               NoItinerary>,
157               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
158
159     def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
160               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
161               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
162               [(set (v2i32 VPR64:$Rd),
163                  (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
164               NoItinerary>,
165               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
166
167     def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
168               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
169               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
170               [(set (v4i32 VPR128:$Rd),
171                  (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
172               NoItinerary>,
173               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
174   }
175 }
176 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
177                                   string asmop, SDPatternOperator opnode,
178                                   bit Commutable = 0>
179    : NeonI_3VSame_HS_sizes<u, opcode,  asmop, opnode, Commutable> {
180   let isCommutable = Commutable in {
181     def _8B :  NeonI_3VSame<0b0, u, 0b00, opcode,
182                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
183                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
184                [(set (v8i8 VPR64:$Rd),
185                   (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
186                NoItinerary>,
187                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
188
189     def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
190                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
191                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
192                [(set (v16i8 VPR128:$Rd),
193                   (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
194                NoItinerary>,
195                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
196   }
197 }
198
199 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
200                                    string asmop, SDPatternOperator opnode,
201                                    bit Commutable = 0>
202    : NeonI_3VSame_BHS_sizes<u, opcode,  asmop, opnode, Commutable> {
203   let isCommutable = Commutable in {
204     def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
205               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
206               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
207               [(set (v2i64 VPR128:$Rd),
208                  (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
209               NoItinerary>,
210               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
211   }
212 }
213
214 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
215 // but Result types can be integer or floating point types.
216 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
217                                  string asmop, SDPatternOperator opnode,
218                                  ValueType ResTy2S, ValueType ResTy4S,
219                                  ValueType ResTy2D, bit Commutable = 0> {
220   let isCommutable = Commutable in {
221     def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
222               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
223               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
224               [(set (ResTy2S VPR64:$Rd),
225                  (ResTy2S (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
226               NoItinerary>,
227               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
228
229     def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
230               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
231               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
232               [(set (ResTy4S VPR128:$Rd),
233                  (ResTy4S (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
234               NoItinerary>,
235               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
236
237     def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
238               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
239               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
240               [(set (ResTy2D VPR128:$Rd),
241                  (ResTy2D (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
242               NoItinerary>,
243               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
244   }
245 }
246
247 //===----------------------------------------------------------------------===//
248 // Instruction Definitions
249 //===----------------------------------------------------------------------===//
250
251 // Vector Arithmetic Instructions
252
253 // Vector Add (Integer and Floating-Point)
254
255 defm ADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
256 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd,
257                                      v2f32, v4f32, v2f64, 1>;
258
259 // Patterns to match add of v1i8/v1i16/v1i32 types
260 def : Pat<(v1i8 (add FPR8:$Rn, FPR8:$Rm)),
261           (EXTRACT_SUBREG
262               (ADDvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
263                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
264               sub_8)>;
265 def : Pat<(v1i16 (add FPR16:$Rn, FPR16:$Rm)),
266           (EXTRACT_SUBREG
267               (ADDvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
268                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
269               sub_16)>;
270 def : Pat<(v1i32 (add FPR32:$Rn, FPR32:$Rm)),
271           (EXTRACT_SUBREG
272               (ADDvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
273                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
274               sub_32)>;
275
276 // Vector Sub (Integer and Floating-Point)
277
278 defm SUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
279 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub,
280                                      v2f32, v4f32, v2f64, 0>;
281
282 // Patterns to match sub of v1i8/v1i16/v1i32 types
283 def : Pat<(v1i8 (sub FPR8:$Rn, FPR8:$Rm)),
284           (EXTRACT_SUBREG
285               (SUBvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
286                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
287               sub_8)>;
288 def : Pat<(v1i16 (sub FPR16:$Rn, FPR16:$Rm)),
289           (EXTRACT_SUBREG
290               (SUBvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
291                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
292               sub_16)>;
293 def : Pat<(v1i32 (sub FPR32:$Rn, FPR32:$Rm)),
294           (EXTRACT_SUBREG
295               (SUBvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
296                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
297               sub_32)>;
298
299 // Vector Multiply (Integer and Floating-Point)
300
301 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
302 defm MULvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
303 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul,
304                                      v2f32, v4f32, v2f64, 1>;
305 }
306
307 // Patterns to match mul of v1i8/v1i16/v1i32 types
308 def : Pat<(v1i8 (mul FPR8:$Rn, FPR8:$Rm)),
309           (EXTRACT_SUBREG 
310               (MULvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
311                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
312               sub_8)>;
313 def : Pat<(v1i16 (mul FPR16:$Rn, FPR16:$Rm)),
314           (EXTRACT_SUBREG 
315               (MULvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
316                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
317               sub_16)>;
318 def : Pat<(v1i32 (mul FPR32:$Rn, FPR32:$Rm)),
319           (EXTRACT_SUBREG
320               (MULvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
321                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
322               sub_32)>;
323
324 // Vector Multiply (Polynomial)
325
326 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
327 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
328                                     int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
329 }
330
331 // Vector Multiply-accumulate and Multiply-subtract (Integer)
332
333 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
334 // two operands constraints.
335 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
336   RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
337   bits<5> opcode, SDPatternOperator opnode>
338   : NeonI_3VSame<q, u, size, opcode,
339     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
340     asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
341     [(set (OpTy VPRC:$Rd),
342        (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
343     NoItinerary>,
344     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
345   let Constraints = "$src = $Rd";
346 }
347
348 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
349                        (add node:$Ra, (mul node:$Rn, node:$Rm))>;
350
351 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
352                        (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
353
354
355 let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC] in {
356 def MLAvvv_8B:  NeonI_3VSame_Constraint_impl<"mla", ".8b",  VPR64,  v8i8,
357                                              0b0, 0b0, 0b00, 0b10010, Neon_mla>;
358 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
359                                              0b1, 0b0, 0b00, 0b10010, Neon_mla>;
360 def MLAvvv_4H:  NeonI_3VSame_Constraint_impl<"mla", ".4h",  VPR64,  v4i16,
361                                              0b0, 0b0, 0b01, 0b10010, Neon_mla>;
362 def MLAvvv_8H:  NeonI_3VSame_Constraint_impl<"mla", ".8h",  VPR128, v8i16,
363                                              0b1, 0b0, 0b01, 0b10010, Neon_mla>;
364 def MLAvvv_2S:  NeonI_3VSame_Constraint_impl<"mla", ".2s",  VPR64,  v2i32,
365                                              0b0, 0b0, 0b10, 0b10010, Neon_mla>;
366 def MLAvvv_4S:  NeonI_3VSame_Constraint_impl<"mla", ".4s",  VPR128, v4i32,
367                                              0b1, 0b0, 0b10, 0b10010, Neon_mla>;
368
369 def MLSvvv_8B:  NeonI_3VSame_Constraint_impl<"mls", ".8b",  VPR64,  v8i8,
370                                              0b0, 0b1, 0b00, 0b10010, Neon_mls>;
371 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
372                                              0b1, 0b1, 0b00, 0b10010, Neon_mls>;
373 def MLSvvv_4H:  NeonI_3VSame_Constraint_impl<"mls", ".4h",  VPR64,  v4i16,
374                                              0b0, 0b1, 0b01, 0b10010, Neon_mls>;
375 def MLSvvv_8H:  NeonI_3VSame_Constraint_impl<"mls", ".8h",  VPR128, v8i16,
376                                              0b1, 0b1, 0b01, 0b10010, Neon_mls>;
377 def MLSvvv_2S:  NeonI_3VSame_Constraint_impl<"mls", ".2s",  VPR64,  v2i32,
378                                              0b0, 0b1, 0b10, 0b10010, Neon_mls>;
379 def MLSvvv_4S:  NeonI_3VSame_Constraint_impl<"mls", ".4s",  VPR128, v4i32,
380                                              0b1, 0b1, 0b10, 0b10010, Neon_mls>;
381 }
382
383 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
384
385 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
386                         (fadd node:$Ra, (fmul_su node:$Rn, node:$Rm))>;
387
388 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
389                         (fsub node:$Ra, (fmul_su node:$Rn, node:$Rm))>;
390
391 let Predicates = [HasNEON, UseFusedMAC],
392     SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC] in {
393 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s",  VPR64,  v2f32,
394                                              0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
395 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s",  VPR128, v4f32,
396                                              0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
397 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d",  VPR128, v2f64,
398                                              0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
399
400 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s",  VPR64,  v2f32,
401                                               0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
402 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s",  VPR128, v4f32,
403                                              0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
404 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d",  VPR128, v2f64,
405                                              0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
406 }
407
408 // We're also allowed to match the fma instruction regardless of compile
409 // options.
410 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
411           (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
412 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
413           (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
414 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
415           (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
416
417 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
418           (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
419 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
420           (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
421 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
422           (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
423
424 // Vector Divide (Floating-Point)
425
426 let SchedRW = [WriteFPDiv, ReadFPDiv, ReadFPDiv] in {
427 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv,
428                                      v2f32, v4f32, v2f64, 0>;
429 }
430
431 // Vector Bitwise Operations
432
433 // Vector Bitwise AND
434
435 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
436
437 // Vector Bitwise Exclusive OR
438
439 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
440
441 // Vector Bitwise OR
442
443 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
444
445 // ORR disassembled as MOV if Vn==Vm
446
447 // Vector Move - register
448 // Alias for ORR if Vn=Vm.
449 // FIXME: This is actually the preferred syntax but TableGen can't deal with
450 // custom printing of aliases.
451 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
452                     (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
453 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
454                     (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
455
456 // The MOVI instruction takes two immediate operands.  The first is the
457 // immediate encoding, while the second is the cmode.  A cmode of 14, or
458 // 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
459 def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
460 def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
461
462 def Neon_not8B  : PatFrag<(ops node:$in),
463                           (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
464 def Neon_not16B : PatFrag<(ops node:$in),
465                           (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
466
467 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
468                          (or node:$Rn, (Neon_not8B node:$Rm))>;
469
470 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
471                           (or node:$Rn, (Neon_not16B node:$Rm))>;
472
473 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
474                          (and node:$Rn, (Neon_not8B node:$Rm))>;
475
476 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
477                           (and node:$Rn, (Neon_not16B node:$Rm))>;
478
479
480 // Vector Bitwise OR NOT - register
481
482 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
483                                    Neon_orn8B, Neon_orn16B, 0>;
484
485 // Vector Bitwise Bit Clear (AND NOT) - register
486
487 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
488                                    Neon_bic8B, Neon_bic16B, 0>;
489
490 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
491                                    SDPatternOperator opnode16B,
492                                    Instruction INST8B,
493                                    Instruction INST16B> {
494   def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
495             (INST8B VPR64:$Rn, VPR64:$Rm)>;
496   def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
497             (INST8B VPR64:$Rn, VPR64:$Rm)>;
498   def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
499             (INST8B VPR64:$Rn, VPR64:$Rm)>;
500   def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
501             (INST16B VPR128:$Rn, VPR128:$Rm)>;
502   def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
503             (INST16B VPR128:$Rn, VPR128:$Rm)>;
504   def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
505             (INST16B VPR128:$Rn, VPR128:$Rm)>;
506 }
507
508 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
509 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
510 defm : Neon_bitwise2V_patterns<or,  or,  ORRvvv_8B, ORRvvv_16B>;
511 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
512 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
513 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
514
515 //   Vector Bitwise Select
516 def BSLvvv_8B  : NeonI_3VSame_Constraint_impl<"bsl", ".8b",  VPR64, v8i8,
517                                               0b0, 0b1, 0b01, 0b00011, vselect>;
518
519 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
520                                               0b1, 0b1, 0b01, 0b00011, vselect>;
521
522 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
523                                    Instruction INST8B,
524                                    Instruction INST16B> {
525   // Disassociate type from instruction definition
526   def : Pat<(v8i8 (opnode (v8i8 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
527             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
528   def : Pat<(v2i32 (opnode (v2i32 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
529             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
530   def : Pat<(v2f32 (opnode (v2i32 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
531             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
532   def : Pat<(v4i16 (opnode (v4i16 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
533             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
534   def : Pat<(v1i64 (opnode (v1i64 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
535             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
536   def : Pat<(v1f64 (opnode (v1i64 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
537             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
538   def : Pat<(v16i8 (opnode (v16i8 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
539             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
540   def : Pat<(v4i32 (opnode (v4i32 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
541             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
542   def : Pat<(v8i16 (opnode (v8i16 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
543             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
544   def : Pat<(v2i64 (opnode (v2i64 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
545             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
546   def : Pat<(v2f64 (opnode (v2i64 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
547             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
548   def : Pat<(v4f32 (opnode (v4i32 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
549             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
550
551   // Allow to match BSL instruction pattern with non-constant operand
552   def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
553                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
554           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
555   def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
556                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
557           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
558   def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
559                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
560           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
561   def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
562                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
563           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
564   def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
565                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
566           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
567   def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
568                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
569           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
570   def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
571                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
572           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
573   def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
574                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
575           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
576
577   // Allow to match llvm.arm.* intrinsics.
578   def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
579                     (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
580             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
581   def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
582                     (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
583             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
584   def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
585                     (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
586             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
587   def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
588                     (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
589             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
590   def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
591                     (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
592             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
593   def : Pat<(v1f64 (int_arm_neon_vbsl (v1f64 VPR64:$src),
594                     (v1f64 VPR64:$Rn), (v1f64 VPR64:$Rm))),
595             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
596   def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
597                     (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
598             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
599   def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
600                     (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
601             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
602   def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
603                     (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
604             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
605   def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
606                     (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
607             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
608   def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
609                     (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
610             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
611   def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
612                     (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
613             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
614 }
615
616 // Additional patterns for bitwise instruction BSL
617 defm: Neon_bitwise3V_patterns<vselect, BSLvvv_8B, BSLvvv_16B>;
618
619 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
620                            (vselect node:$src, node:$Rn, node:$Rm),
621                            [{ (void)N; return false; }]>;
622
623 // Vector Bitwise Insert if True
624
625 def BITvvv_8B  : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64,   v8i8,
626                    0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
627 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
628                    0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
629
630 // Vector Bitwise Insert if False
631
632 def BIFvvv_8B  : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64,  v8i8,
633                                 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
634 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
635                                 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
636
637 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
638
639 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
640                        (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
641 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
642                        (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
643
644 // Vector Absolute Difference and Accumulate (Unsigned)
645 def UABAvvv_8B :  NeonI_3VSame_Constraint_impl<"uaba", ".8b",  VPR64,  v8i8,
646                     0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
647 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
648                     0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
649 def UABAvvv_4H :  NeonI_3VSame_Constraint_impl<"uaba", ".4h",  VPR64,  v4i16,
650                     0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
651 def UABAvvv_8H :  NeonI_3VSame_Constraint_impl<"uaba", ".8h",  VPR128, v8i16,
652                     0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
653 def UABAvvv_2S :  NeonI_3VSame_Constraint_impl<"uaba", ".2s",  VPR64,  v2i32,
654                     0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
655 def UABAvvv_4S :  NeonI_3VSame_Constraint_impl<"uaba", ".4s",  VPR128, v4i32,
656                     0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
657
658 // Vector Absolute Difference and Accumulate (Signed)
659 def SABAvvv_8B :  NeonI_3VSame_Constraint_impl<"saba", ".8b",  VPR64,  v8i8,
660                     0b0, 0b0, 0b00, 0b01111, Neon_saba>;
661 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
662                     0b1, 0b0, 0b00, 0b01111, Neon_saba>;
663 def SABAvvv_4H :  NeonI_3VSame_Constraint_impl<"saba", ".4h",  VPR64,  v4i16,
664                     0b0, 0b0, 0b01, 0b01111, Neon_saba>;
665 def SABAvvv_8H :  NeonI_3VSame_Constraint_impl<"saba", ".8h",  VPR128, v8i16,
666                     0b1, 0b0, 0b01, 0b01111, Neon_saba>;
667 def SABAvvv_2S :  NeonI_3VSame_Constraint_impl<"saba", ".2s",  VPR64,  v2i32,
668                     0b0, 0b0, 0b10, 0b01111, Neon_saba>;
669 def SABAvvv_4S :  NeonI_3VSame_Constraint_impl<"saba", ".4s",  VPR128, v4i32,
670                     0b1, 0b0, 0b10, 0b01111, Neon_saba>;
671
672
673 // Vector Absolute Difference (Signed, Unsigned)
674 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
675 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
676
677 // Vector Absolute Difference (Floating Point)
678 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
679                                     int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
680
681 // Vector Reciprocal Step (Floating Point)
682 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
683                                        int_arm_neon_vrecps,
684                                        v2f32, v4f32, v2f64, 0>;
685
686 // Vector Reciprocal Square Root Step (Floating Point)
687 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
688                                         int_arm_neon_vrsqrts,
689                                         v2f32, v4f32, v2f64, 0>;
690
691 // Vector Comparisons
692
693 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
694                         (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
695 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
696                          (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
697 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
698                         (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
699 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
700                         (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
701 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
702                         (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
703
704 // NeonI_compare_aliases class: swaps register operands to implement
705 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
706 class NeonI_compare_aliases<string asmop, string asmlane,
707                             Instruction inst, RegisterOperand VPRC>
708   : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
709                     ", $Rm" # asmlane,
710                   (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
711
712 // Vector Comparisons (Integer)
713
714 // Vector Compare Mask Equal (Integer)
715 let isCommutable =1 in {
716 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
717 }
718
719 // Vector Compare Mask Higher or Same (Unsigned Integer)
720 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
721
722 // Vector Compare Mask Greater Than or Equal (Integer)
723 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
724
725 // Vector Compare Mask Higher (Unsigned Integer)
726 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
727
728 // Vector Compare Mask Greater Than (Integer)
729 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
730
731 // Vector Compare Mask Bitwise Test (Integer)
732 defm CMTSTvvv:  NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
733
734 // Vector Compare Mask Less or Same (Unsigned Integer)
735 // CMLS is alias for CMHS with operands reversed.
736 def CMLSvvv_8B  : NeonI_compare_aliases<"cmls", ".8b",  CMHSvvv_8B,  VPR64>;
737 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
738 def CMLSvvv_4H  : NeonI_compare_aliases<"cmls", ".4h",  CMHSvvv_4H,  VPR64>;
739 def CMLSvvv_8H  : NeonI_compare_aliases<"cmls", ".8h",  CMHSvvv_8H,  VPR128>;
740 def CMLSvvv_2S  : NeonI_compare_aliases<"cmls", ".2s",  CMHSvvv_2S,  VPR64>;
741 def CMLSvvv_4S  : NeonI_compare_aliases<"cmls", ".4s",  CMHSvvv_4S,  VPR128>;
742 def CMLSvvv_2D  : NeonI_compare_aliases<"cmls", ".2d",  CMHSvvv_2D,  VPR128>;
743
744 // Vector Compare Mask Less Than or Equal (Integer)
745 // CMLE is alias for CMGE with operands reversed.
746 def CMLEvvv_8B  : NeonI_compare_aliases<"cmle", ".8b",  CMGEvvv_8B,  VPR64>;
747 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
748 def CMLEvvv_4H  : NeonI_compare_aliases<"cmle", ".4h",  CMGEvvv_4H,  VPR64>;
749 def CMLEvvv_8H  : NeonI_compare_aliases<"cmle", ".8h",  CMGEvvv_8H,  VPR128>;
750 def CMLEvvv_2S  : NeonI_compare_aliases<"cmle", ".2s",  CMGEvvv_2S,  VPR64>;
751 def CMLEvvv_4S  : NeonI_compare_aliases<"cmle", ".4s",  CMGEvvv_4S,  VPR128>;
752 def CMLEvvv_2D  : NeonI_compare_aliases<"cmle", ".2d",  CMGEvvv_2D,  VPR128>;
753
754 // Vector Compare Mask Lower (Unsigned Integer)
755 // CMLO is alias for CMHI with operands reversed.
756 def CMLOvvv_8B  : NeonI_compare_aliases<"cmlo", ".8b",  CMHIvvv_8B,  VPR64>;
757 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
758 def CMLOvvv_4H  : NeonI_compare_aliases<"cmlo", ".4h",  CMHIvvv_4H,  VPR64>;
759 def CMLOvvv_8H  : NeonI_compare_aliases<"cmlo", ".8h",  CMHIvvv_8H,  VPR128>;
760 def CMLOvvv_2S  : NeonI_compare_aliases<"cmlo", ".2s",  CMHIvvv_2S,  VPR64>;
761 def CMLOvvv_4S  : NeonI_compare_aliases<"cmlo", ".4s",  CMHIvvv_4S,  VPR128>;
762 def CMLOvvv_2D  : NeonI_compare_aliases<"cmlo", ".2d",  CMHIvvv_2D,  VPR128>;
763
764 // Vector Compare Mask Less Than (Integer)
765 // CMLT is alias for CMGT with operands reversed.
766 def CMLTvvv_8B  : NeonI_compare_aliases<"cmlt", ".8b",  CMGTvvv_8B,  VPR64>;
767 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
768 def CMLTvvv_4H  : NeonI_compare_aliases<"cmlt", ".4h",  CMGTvvv_4H,  VPR64>;
769 def CMLTvvv_8H  : NeonI_compare_aliases<"cmlt", ".8h",  CMGTvvv_8H,  VPR128>;
770 def CMLTvvv_2S  : NeonI_compare_aliases<"cmlt", ".2s",  CMGTvvv_2S,  VPR64>;
771 def CMLTvvv_4S  : NeonI_compare_aliases<"cmlt", ".4s",  CMGTvvv_4S,  VPR128>;
772 def CMLTvvv_2D  : NeonI_compare_aliases<"cmlt", ".2d",  CMGTvvv_2D,  VPR128>;
773
774
775 def neon_uimm0_asmoperand : AsmOperandClass
776 {
777   let Name = "UImm0";
778   let PredicateMethod = "isUImm<0>";
779   let RenderMethod = "addImmOperands";
780 }
781
782 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
783   let ParserMatchClass = neon_uimm0_asmoperand;
784   let PrintMethod = "printNeonUImm0Operand";
785
786 }
787
788 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
789 {
790   def _8B :  NeonI_2VMisc<0b0, u, 0b00, opcode,
791              (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
792              asmop # "\t$Rd.8b, $Rn.8b, $Imm",
793              [(set (v8i8 VPR64:$Rd),
794                 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
795              NoItinerary>,
796              Sched<[WriteFPALU, ReadFPALU]>;
797
798   def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
799              (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
800              asmop # "\t$Rd.16b, $Rn.16b, $Imm",
801              [(set (v16i8 VPR128:$Rd),
802                 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
803              NoItinerary>,
804              Sched<[WriteFPALU, ReadFPALU]>;
805
806   def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
807             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
808             asmop # "\t$Rd.4h, $Rn.4h, $Imm",
809             [(set (v4i16 VPR64:$Rd),
810                (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
811             NoItinerary>,
812             Sched<[WriteFPALU, ReadFPALU]>;
813
814   def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
815             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
816             asmop # "\t$Rd.8h, $Rn.8h, $Imm",
817             [(set (v8i16 VPR128:$Rd),
818                (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
819             NoItinerary>,
820             Sched<[WriteFPALU, ReadFPALU]>;
821
822   def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
823             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
824             asmop # "\t$Rd.2s, $Rn.2s, $Imm",
825             [(set (v2i32 VPR64:$Rd),
826                (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
827             NoItinerary>,
828             Sched<[WriteFPALU, ReadFPALU]>;
829
830   def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
831             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
832             asmop # "\t$Rd.4s, $Rn.4s, $Imm",
833             [(set (v4i32 VPR128:$Rd),
834                (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
835             NoItinerary>,
836             Sched<[WriteFPALU, ReadFPALU]>;
837
838   def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
839             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
840             asmop # "\t$Rd.2d, $Rn.2d, $Imm",
841             [(set (v2i64 VPR128:$Rd),
842                (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
843             NoItinerary>,
844             Sched<[WriteFPALU, ReadFPALU]>;
845 }
846
847 // Vector Compare Mask Equal to Zero (Integer)
848 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
849
850 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
851 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
852
853 // Vector Compare Mask Greater Than Zero (Signed Integer)
854 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
855
856 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
857 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
858
859 // Vector Compare Mask Less Than Zero (Signed Integer)
860 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
861
862 // Vector Comparisons (Floating Point)
863
864 // Vector Compare Mask Equal (Floating Point)
865 let isCommutable =1 in {
866 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
867                                       v2i32, v4i32, v2i64, 0>;
868 }
869
870 // Vector Compare Mask Greater Than Or Equal (Floating Point)
871 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
872                                       v2i32, v4i32, v2i64, 0>;
873
874 // Vector Compare Mask Greater Than (Floating Point)
875 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
876                                       v2i32, v4i32, v2i64, 0>;
877
878 // Vector Compare Mask Less Than Or Equal (Floating Point)
879 // FCMLE is alias for FCMGE with operands reversed.
880 def FCMLEvvv_2S  : NeonI_compare_aliases<"fcmle", ".2s",  FCMGEvvv_2S,  VPR64>;
881 def FCMLEvvv_4S  : NeonI_compare_aliases<"fcmle", ".4s",  FCMGEvvv_4S,  VPR128>;
882 def FCMLEvvv_2D  : NeonI_compare_aliases<"fcmle", ".2d",  FCMGEvvv_2D,  VPR128>;
883
884 // Vector Compare Mask Less Than (Floating Point)
885 // FCMLT is alias for FCMGT with operands reversed.
886 def FCMLTvvv_2S  : NeonI_compare_aliases<"fcmlt", ".2s",  FCMGTvvv_2S,  VPR64>;
887 def FCMLTvvv_4S  : NeonI_compare_aliases<"fcmlt", ".4s",  FCMGTvvv_4S,  VPR128>;
888 def FCMLTvvv_2D  : NeonI_compare_aliases<"fcmlt", ".2d",  FCMGTvvv_2D,  VPR128>;
889
890 def fpzero_izero_asmoperand : AsmOperandClass {
891   let Name = "FPZeroIZero";
892   let ParserMethod = "ParseFPImm0AndImm0Operand";
893   let DiagnosticType = "FPZero";
894 }
895
896 def fpzz32 : Operand<f32>,
897              ComplexPattern<f32, 1, "SelectFPZeroOperand", [fpimm]> {
898   let ParserMatchClass = fpzero_izero_asmoperand;
899   let PrintMethod = "printFPZeroOperand";
900   let DecoderMethod = "DecodeFPZeroOperand";
901 }
902
903 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
904                               string asmop, CondCode CC>
905 {
906   def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
907             (outs VPR64:$Rd), (ins VPR64:$Rn, fpzz32:$FPImm),
908             asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
909             [(set (v2i32 VPR64:$Rd),
910                (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpzz32:$FPImm), CC)))],
911             NoItinerary>,
912             Sched<[WriteFPALU, ReadFPALU]>;
913
914   def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
915             (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm),
916             asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
917             [(set (v4i32 VPR128:$Rd),
918                (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))],
919             NoItinerary>,
920             Sched<[WriteFPALU, ReadFPALU]>;
921
922   def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
923             (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm),
924             asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
925             [(set (v2i64 VPR128:$Rd),
926                (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))],
927             NoItinerary>,
928             Sched<[WriteFPALU, ReadFPALU]>;
929 }
930
931 // Vector Compare Mask Equal to Zero (Floating Point)
932 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
933
934 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
935 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
936
937 // Vector Compare Mask Greater Than Zero (Floating Point)
938 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
939
940 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
941 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
942
943 // Vector Compare Mask Less Than Zero (Floating Point)
944 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
945
946 // Vector Absolute Comparisons (Floating Point)
947
948 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
949 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
950                                       int_arm_neon_vacge,
951                                       v2i32, v4i32, v2i64, 0>;
952
953 // Vector Absolute Compare Mask Greater Than (Floating Point)
954 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
955                                       int_arm_neon_vacgt,
956                                       v2i32, v4i32, v2i64, 0>;
957
958 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
959 // FACLE is alias for FACGE with operands reversed.
960 def FACLEvvv_2S  : NeonI_compare_aliases<"facle", ".2s",  FACGEvvv_2S,  VPR64>;
961 def FACLEvvv_4S  : NeonI_compare_aliases<"facle", ".4s",  FACGEvvv_4S,  VPR128>;
962 def FACLEvvv_2D  : NeonI_compare_aliases<"facle", ".2d",  FACGEvvv_2D,  VPR128>;
963
964 // Vector Absolute Compare Mask Less Than (Floating Point)
965 // FACLT is alias for FACGT with operands reversed.
966 def FACLTvvv_2S  : NeonI_compare_aliases<"faclt", ".2s",  FACGTvvv_2S,  VPR64>;
967 def FACLTvvv_4S  : NeonI_compare_aliases<"faclt", ".4s",  FACGTvvv_4S,  VPR128>;
968 def FACLTvvv_2D  : NeonI_compare_aliases<"faclt", ".2d",  FACGTvvv_2D,  VPR128>;
969
970 // Vector halving add (Integer Signed, Unsigned)
971 defm SHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
972                                         int_arm_neon_vhadds, 1>;
973 defm UHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
974                                         int_arm_neon_vhaddu, 1>;
975
976 // Vector halving sub (Integer Signed, Unsigned)
977 defm SHSUBvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
978                                         int_arm_neon_vhsubs, 0>;
979 defm UHSUBvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
980                                         int_arm_neon_vhsubu, 0>;
981
982 // Vector rouding halving add (Integer Signed, Unsigned)
983 defm SRHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
984                                          int_arm_neon_vrhadds, 1>;
985 defm URHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
986                                          int_arm_neon_vrhaddu, 1>;
987
988 // Vector Saturating add (Integer Signed, Unsigned)
989 defm SQADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
990                    int_arm_neon_vqadds, 1>;
991 defm UQADDvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
992                    int_arm_neon_vqaddu, 1>;
993
994 // Vector Saturating sub (Integer Signed, Unsigned)
995 defm SQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
996                    int_arm_neon_vqsubs, 1>;
997 defm UQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
998                    int_arm_neon_vqsubu, 1>;
999
1000 // Vector Shift Left (Signed and Unsigned Integer)
1001 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
1002                  int_arm_neon_vshifts, 1>;
1003 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
1004                  int_arm_neon_vshiftu, 1>;
1005
1006 // Vector Saturating Shift Left (Signed and Unsigned Integer)
1007 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
1008                   int_arm_neon_vqshifts, 1>;
1009 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
1010                   int_arm_neon_vqshiftu, 1>;
1011
1012 // Vector Rouding Shift Left (Signed and Unsigned Integer)
1013 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
1014                   int_arm_neon_vrshifts, 1>;
1015 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
1016                   int_arm_neon_vrshiftu, 1>;
1017
1018 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
1019 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
1020                    int_arm_neon_vqrshifts, 1>;
1021 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
1022                    int_arm_neon_vqrshiftu, 1>;
1023
1024 // Vector Maximum (Signed and Unsigned Integer)
1025 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
1026 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
1027
1028 // Vector Minimum (Signed and Unsigned Integer)
1029 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
1030 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
1031
1032 // Vector Maximum (Floating Point)
1033 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
1034                                      int_arm_neon_vmaxs,
1035                                      v2f32, v4f32, v2f64, 1>;
1036
1037 // Vector Minimum (Floating Point)
1038 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
1039                                      int_arm_neon_vmins,
1040                                      v2f32, v4f32, v2f64, 1>;
1041
1042 // Vector maxNum (Floating Point) -  prefer a number over a quiet NaN)
1043 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
1044                                        int_aarch64_neon_vmaxnm,
1045                                        v2f32, v4f32, v2f64, 1>;
1046
1047 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
1048 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
1049                                        int_aarch64_neon_vminnm,
1050                                        v2f32, v4f32, v2f64, 1>;
1051
1052 // Vector Maximum Pairwise (Signed and Unsigned Integer)
1053 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
1054 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
1055
1056 // Vector Minimum Pairwise (Signed and Unsigned Integer)
1057 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
1058 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
1059
1060 // Vector Maximum Pairwise (Floating Point)
1061 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
1062                                      int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
1063
1064 // Vector Minimum Pairwise (Floating Point)
1065 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
1066                                      int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
1067
1068 // Vector maxNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
1069 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
1070                                        int_aarch64_neon_vpmaxnm,
1071                                        v2f32, v4f32, v2f64, 1>;
1072
1073 // Vector minNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
1074 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
1075                                        int_aarch64_neon_vpminnm,
1076                                        v2f32, v4f32, v2f64, 1>;
1077
1078 // Vector Addition Pairwise (Integer)
1079 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
1080
1081 // Vector Addition Pairwise (Floating Point)
1082 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
1083                                        int_arm_neon_vpadd,
1084                                        v2f32, v4f32, v2f64, 1>;
1085
1086 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
1087 // Vector Saturating Doubling Multiply High
1088 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
1089                     int_arm_neon_vqdmulh, 1>;
1090
1091 // Vector Saturating Rouding Doubling Multiply High
1092 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
1093                      int_arm_neon_vqrdmulh, 1>;
1094
1095 // Vector Multiply Extended (Floating Point)
1096 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
1097                                       int_aarch64_neon_vmulx,
1098                                       v2f32, v4f32, v2f64, 1>;
1099 }
1100
1101 // Patterns to match llvm.aarch64.* intrinsic for 
1102 // ADDP, SMINP, UMINP, SMAXP, UMAXP having i32 as output
1103 class Neon_VectorPair_v2i32_pattern<SDPatternOperator opnode, Instruction INST>
1104   : Pat<(v1i32 (opnode (v2i32 VPR64:$Rn))),
1105         (EXTRACT_SUBREG
1106              (v2i32 (INST (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rn))),
1107              sub_32)>;
1108
1109 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_sminv, SMINPvvv_2S>;
1110 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_uminv, UMINPvvv_2S>;
1111 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_smaxv, SMAXPvvv_2S>;
1112 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_umaxv, UMAXPvvv_2S>;
1113 def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_vaddv, ADDP_2S>;
1114
1115 // Vector Immediate Instructions
1116
1117 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
1118 {
1119   def _asmoperand : AsmOperandClass
1120     {
1121       let Name = "NeonMovImmShift" # PREFIX;
1122       let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
1123       let PredicateMethod = "isNeonMovImmShift" # PREFIX;
1124     }
1125 }
1126
1127 // Definition of vector immediates shift operands
1128
1129 // The selectable use-cases extract the shift operation
1130 // information from the OpCmode fields encoded in the immediate.
1131 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
1132   uint64_t OpCmode = N->getZExtValue();
1133   unsigned ShiftImm;
1134   unsigned ShiftOnesIn;
1135   unsigned HasShift =
1136     A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1137   if (!HasShift) return SDValue();
1138   return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
1139 }]>;
1140
1141 // Vector immediates shift operands which accept LSL and MSL
1142 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
1143 // or 0, 8 (LSLH) or 8, 16 (MSL).
1144 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
1145 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
1146 // LSLH restricts shift amount to  0, 8 out of 0, 8, 16, 24
1147 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
1148
1149 multiclass neon_mov_imm_shift_operands<string PREFIX,
1150                                        string HALF, string ISHALF, code pred>
1151 {
1152    def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
1153     {
1154       let PrintMethod =
1155         "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1156       let DecoderMethod =
1157         "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1158       let ParserMatchClass =
1159         !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1160     }
1161 }
1162
1163 defm neon_mov_imm_LSL  : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1164   unsigned ShiftImm;
1165   unsigned ShiftOnesIn;
1166   unsigned HasShift =
1167     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1168   return (HasShift && !ShiftOnesIn);
1169 }]>;
1170
1171 defm neon_mov_imm_MSL  : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1172   unsigned ShiftImm;
1173   unsigned ShiftOnesIn;
1174   unsigned HasShift =
1175     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1176   return (HasShift && ShiftOnesIn);
1177 }]>;
1178
1179 defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1180   unsigned ShiftImm;
1181   unsigned ShiftOnesIn;
1182   unsigned HasShift =
1183     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1184   return (HasShift && !ShiftOnesIn);
1185 }]>;
1186
1187 def neon_uimm1_asmoperand : AsmOperandClass
1188 {
1189   let Name = "UImm1";
1190   let PredicateMethod = "isUImm<1>";
1191   let RenderMethod = "addImmOperands";
1192 }
1193
1194 def neon_uimm2_asmoperand : AsmOperandClass
1195 {
1196   let Name = "UImm2";
1197   let PredicateMethod = "isUImm<2>";
1198   let RenderMethod = "addImmOperands";
1199 }
1200
1201 def neon_uimm8_asmoperand : AsmOperandClass
1202 {
1203   let Name = "UImm8";
1204   let PredicateMethod = "isUImm<8>";
1205   let RenderMethod = "addImmOperands";
1206 }
1207
1208 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1209   let ParserMatchClass = neon_uimm8_asmoperand;
1210   let PrintMethod = "printUImmHexOperand";
1211 }
1212
1213 def neon_uimm64_mask_asmoperand : AsmOperandClass
1214 {
1215   let Name = "NeonUImm64Mask";
1216   let PredicateMethod = "isNeonUImm64Mask";
1217   let RenderMethod = "addNeonUImm64MaskOperands";
1218 }
1219
1220 // MCOperand for 64-bit bytemask with each byte having only the
1221 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1222 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1223   let ParserMatchClass = neon_uimm64_mask_asmoperand;
1224   let PrintMethod = "printNeonUImm64MaskOperand";
1225 }
1226
1227 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1228                                    SDPatternOperator opnode>
1229 {
1230     // shift zeros, per word
1231     def _2S  : NeonI_1VModImm<0b0, op,
1232                               (outs VPR64:$Rd),
1233                               (ins neon_uimm8:$Imm,
1234                                 neon_mov_imm_LSL_operand:$Simm),
1235                               !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1236                               [(set (v2i32 VPR64:$Rd),
1237                                  (v2i32 (opnode (timm:$Imm),
1238                                    (neon_mov_imm_LSL_operand:$Simm))))],
1239                               NoItinerary>,
1240                Sched<[WriteFPALU]> {
1241        bits<2> Simm;
1242        let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1243      }
1244
1245     def _4S  : NeonI_1VModImm<0b1, op,
1246                               (outs VPR128:$Rd),
1247                               (ins neon_uimm8:$Imm,
1248                                 neon_mov_imm_LSL_operand:$Simm),
1249                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1250                               [(set (v4i32 VPR128:$Rd),
1251                                  (v4i32 (opnode (timm:$Imm),
1252                                    (neon_mov_imm_LSL_operand:$Simm))))],
1253                               NoItinerary>,
1254                Sched<[WriteFPALU]> {
1255       bits<2> Simm;
1256       let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1257     }
1258
1259     // shift zeros, per halfword
1260     def _4H  : NeonI_1VModImm<0b0, op,
1261                               (outs VPR64:$Rd),
1262                               (ins neon_uimm8:$Imm,
1263                                 neon_mov_imm_LSLH_operand:$Simm),
1264                               !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1265                               [(set (v4i16 VPR64:$Rd),
1266                                  (v4i16 (opnode (timm:$Imm),
1267                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1268                               NoItinerary>,
1269                Sched<[WriteFPALU]> {
1270       bit  Simm;
1271       let cmode = {0b1, 0b0, Simm, 0b0};
1272     }
1273
1274     def _8H  : NeonI_1VModImm<0b1, op,
1275                               (outs VPR128:$Rd),
1276                               (ins neon_uimm8:$Imm,
1277                                 neon_mov_imm_LSLH_operand:$Simm),
1278                               !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1279                               [(set (v8i16 VPR128:$Rd),
1280                                  (v8i16 (opnode (timm:$Imm),
1281                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1282                               NoItinerary>,
1283                Sched<[WriteFPALU]> {
1284       bit Simm;
1285       let cmode = {0b1, 0b0, Simm, 0b0};
1286      }
1287 }
1288
1289 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1290                                                    SDPatternOperator opnode,
1291                                                    SDPatternOperator neonopnode>
1292 {
1293   let Constraints = "$src = $Rd" in {
1294     // shift zeros, per word
1295     def _2S  : NeonI_1VModImm<0b0, op,
1296                  (outs VPR64:$Rd),
1297                  (ins VPR64:$src, neon_uimm8:$Imm,
1298                    neon_mov_imm_LSL_operand:$Simm),
1299                  !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1300                  [(set (v2i32 VPR64:$Rd),
1301                     (v2i32 (opnode (v2i32 VPR64:$src),
1302                       (v2i32 (neonopnode timm:$Imm,
1303                         neon_mov_imm_LSL_operand:$Simm)))))],
1304                  NoItinerary>,
1305                Sched<[WriteFPALU, ReadFPALU]> {
1306       bits<2> Simm;
1307       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1308     }
1309
1310     def _4S  : NeonI_1VModImm<0b1, op,
1311                  (outs VPR128:$Rd),
1312                  (ins VPR128:$src, neon_uimm8:$Imm,
1313                    neon_mov_imm_LSL_operand:$Simm),
1314                  !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1315                  [(set (v4i32 VPR128:$Rd),
1316                     (v4i32 (opnode (v4i32 VPR128:$src),
1317                       (v4i32 (neonopnode timm:$Imm,
1318                         neon_mov_imm_LSL_operand:$Simm)))))],
1319                  NoItinerary>,
1320                Sched<[WriteFPALU, ReadFPALU]> {
1321       bits<2> Simm;
1322       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1323     }
1324
1325     // shift zeros, per halfword
1326     def _4H  : NeonI_1VModImm<0b0, op,
1327                  (outs VPR64:$Rd),
1328                  (ins VPR64:$src, neon_uimm8:$Imm,
1329                    neon_mov_imm_LSLH_operand:$Simm),
1330                  !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1331                  [(set (v4i16 VPR64:$Rd),
1332                     (v4i16 (opnode (v4i16 VPR64:$src),
1333                        (v4i16 (neonopnode timm:$Imm,
1334                           neon_mov_imm_LSL_operand:$Simm)))))],
1335                  NoItinerary>,
1336                Sched<[WriteFPALU, ReadFPALU]> {
1337       bit  Simm;
1338       let cmode = {0b1, 0b0, Simm, 0b1};
1339     }
1340
1341     def _8H  : NeonI_1VModImm<0b1, op,
1342                  (outs VPR128:$Rd),
1343                  (ins VPR128:$src, neon_uimm8:$Imm,
1344                    neon_mov_imm_LSLH_operand:$Simm),
1345                  !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1346                  [(set (v8i16 VPR128:$Rd),
1347                     (v8i16 (opnode (v8i16 VPR128:$src),
1348                       (v8i16 (neonopnode timm:$Imm,
1349                         neon_mov_imm_LSL_operand:$Simm)))))],
1350                  NoItinerary>,
1351                Sched<[WriteFPALU, ReadFPALU]> {
1352       bit Simm;
1353       let cmode = {0b1, 0b0, Simm, 0b1};
1354     }
1355   }
1356 }
1357
1358 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1359                                    SDPatternOperator opnode>
1360 {
1361     // shift ones, per word
1362     def _2S  : NeonI_1VModImm<0b0, op,
1363                              (outs VPR64:$Rd),
1364                              (ins neon_uimm8:$Imm,
1365                                neon_mov_imm_MSL_operand:$Simm),
1366                              !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1367                               [(set (v2i32 VPR64:$Rd),
1368                                  (v2i32 (opnode (timm:$Imm),
1369                                    (neon_mov_imm_MSL_operand:$Simm))))],
1370                              NoItinerary>,
1371                Sched<[WriteFPALU]> {
1372        bit Simm;
1373        let cmode = {0b1, 0b1, 0b0, Simm};
1374      }
1375
1376    def _4S  : NeonI_1VModImm<0b1, op,
1377                               (outs VPR128:$Rd),
1378                               (ins neon_uimm8:$Imm,
1379                                 neon_mov_imm_MSL_operand:$Simm),
1380                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1381                               [(set (v4i32 VPR128:$Rd),
1382                                  (v4i32 (opnode (timm:$Imm),
1383                                    (neon_mov_imm_MSL_operand:$Simm))))],
1384                               NoItinerary>,
1385               Sched<[WriteFPALU]> {
1386      bit Simm;
1387      let cmode = {0b1, 0b1, 0b0, Simm};
1388    }
1389 }
1390
1391 // Vector Move Immediate Shifted
1392 let isReMaterializable = 1 in {
1393 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1394 }
1395
1396 // Vector Move Inverted Immediate Shifted
1397 let isReMaterializable = 1 in {
1398 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1399 }
1400
1401 // Vector Bitwise Bit Clear (AND NOT) - immediate
1402 let isReMaterializable = 1 in {
1403 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1404                                                          and, Neon_mvni>;
1405 }
1406
1407 // Vector Bitwise OR - immedidate
1408
1409 let isReMaterializable = 1 in {
1410 defm ORRvi_lsl   : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1411                                                            or, Neon_movi>;
1412 }
1413
1414 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1415 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1416 // BIC immediate instructions selection requires additional patterns to
1417 // transform Neon_movi operands into BIC immediate operands
1418
1419 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1420   uint64_t OpCmode = N->getZExtValue();
1421   unsigned ShiftImm;
1422   unsigned ShiftOnesIn;
1423   (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1424   // LSLH restricts shift amount to  0, 8 which are encoded as 0 and 1
1425   // Transform encoded shift amount 0 to 1 and 1 to 0.
1426   return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1427 }]>;
1428
1429 def neon_mov_imm_LSLH_transform_operand
1430   : ImmLeaf<i32, [{
1431     unsigned ShiftImm;
1432     unsigned ShiftOnesIn;
1433     unsigned HasShift =
1434       A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1435     return (HasShift && !ShiftOnesIn); }],
1436   neon_mov_imm_LSLH_transform_XFORM>;
1437
1438 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0xff, LSL 8)
1439 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0xff)
1440 def : Pat<(v4i16 (and VPR64:$src,
1441             (v4i16 (Neon_movi 255,
1442               neon_mov_imm_LSLH_transform_operand:$Simm)))),
1443           (BICvi_lsl_4H VPR64:$src, 255,
1444             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1445
1446 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0xff, LSL 8)
1447 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0xff)
1448 def : Pat<(v8i16 (and VPR128:$src,
1449             (v8i16 (Neon_movi 255,
1450               neon_mov_imm_LSLH_transform_operand:$Simm)))),
1451           (BICvi_lsl_8H VPR128:$src, 255,
1452             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1453
1454 def : Pat<(v8i8 (and VPR64:$src,
1455                   (bitconvert(v4i16 (Neon_movi 255,
1456                     neon_mov_imm_LSLH_transform_operand:$Simm))))),
1457           (BICvi_lsl_4H VPR64:$src, 255,
1458             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1459 def : Pat<(v2i32 (and VPR64:$src,
1460                  (bitconvert(v4i16 (Neon_movi 255,
1461                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1462           (BICvi_lsl_4H VPR64:$src, 255,
1463             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1464 def : Pat<(v1i64 (and VPR64:$src,
1465                 (bitconvert(v4i16 (Neon_movi 255,
1466                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
1467         (BICvi_lsl_4H VPR64:$src, 255,
1468           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1469
1470 def : Pat<(v16i8 (and VPR128:$src,
1471                  (bitconvert(v8i16 (Neon_movi 255,
1472                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1473         (BICvi_lsl_8H VPR128:$src, 255,
1474           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1475 def : Pat<(v4i32 (and VPR128:$src,
1476                  (bitconvert(v8i16 (Neon_movi 255,
1477                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1478         (BICvi_lsl_8H VPR128:$src, 255,
1479           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1480 def : Pat<(v2i64 (and VPR128:$src,
1481                  (bitconvert(v8i16 (Neon_movi 255,
1482                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
1483         (BICvi_lsl_8H VPR128:$src, 255,
1484           neon_mov_imm_LSLH_transform_operand:$Simm)>;
1485
1486 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1487                                    SDPatternOperator neonopnode,
1488                                    Instruction INST4H,
1489                                    Instruction INST8H,
1490                                    Instruction INST2S,
1491                                    Instruction INST4S> {
1492   def : Pat<(v8i8 (opnode VPR64:$src,
1493                     (bitconvert(v4i16 (neonopnode timm:$Imm,
1494                       neon_mov_imm_LSLH_operand:$Simm))))),
1495             (INST4H VPR64:$src, neon_uimm8:$Imm,
1496               neon_mov_imm_LSLH_operand:$Simm)>;
1497   def : Pat<(v2i32 (opnode VPR64:$src,
1498                    (bitconvert(v4i16 (neonopnode timm:$Imm,
1499                      neon_mov_imm_LSLH_operand:$Simm))))),
1500             (INST4H VPR64:$src, neon_uimm8:$Imm,
1501               neon_mov_imm_LSLH_operand:$Simm)>;
1502   def : Pat<(v1i64 (opnode VPR64:$src,
1503                   (bitconvert(v4i16 (neonopnode timm:$Imm,
1504                     neon_mov_imm_LSLH_operand:$Simm))))),
1505           (INST4H VPR64:$src, neon_uimm8:$Imm,
1506             neon_mov_imm_LSLH_operand:$Simm)>;
1507
1508   def : Pat<(v16i8 (opnode VPR128:$src,
1509                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1510                      neon_mov_imm_LSLH_operand:$Simm))))),
1511           (INST8H VPR128:$src, neon_uimm8:$Imm,
1512             neon_mov_imm_LSLH_operand:$Simm)>;
1513   def : Pat<(v4i32 (opnode VPR128:$src,
1514                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1515                      neon_mov_imm_LSLH_operand:$Simm))))),
1516           (INST8H VPR128:$src, neon_uimm8:$Imm,
1517             neon_mov_imm_LSLH_operand:$Simm)>;
1518   def : Pat<(v2i64 (opnode VPR128:$src,
1519                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1520                      neon_mov_imm_LSLH_operand:$Simm))))),
1521           (INST8H VPR128:$src, neon_uimm8:$Imm,
1522             neon_mov_imm_LSLH_operand:$Simm)>;
1523
1524   def : Pat<(v8i8 (opnode VPR64:$src,
1525                     (bitconvert(v2i32 (neonopnode timm:$Imm,
1526                       neon_mov_imm_LSLH_operand:$Simm))))),
1527             (INST2S VPR64:$src, neon_uimm8:$Imm,
1528               neon_mov_imm_LSLH_operand:$Simm)>;
1529   def : Pat<(v4i16 (opnode VPR64:$src,
1530                    (bitconvert(v2i32 (neonopnode timm:$Imm,
1531                      neon_mov_imm_LSLH_operand:$Simm))))),
1532             (INST2S VPR64:$src, neon_uimm8:$Imm,
1533               neon_mov_imm_LSLH_operand:$Simm)>;
1534   def : Pat<(v1i64 (opnode VPR64:$src,
1535                   (bitconvert(v2i32 (neonopnode timm:$Imm,
1536                     neon_mov_imm_LSLH_operand:$Simm))))),
1537           (INST2S VPR64:$src, neon_uimm8:$Imm,
1538             neon_mov_imm_LSLH_operand:$Simm)>;
1539
1540   def : Pat<(v16i8 (opnode VPR128:$src,
1541                    (bitconvert(v4i32 (neonopnode timm:$Imm,
1542                      neon_mov_imm_LSLH_operand:$Simm))))),
1543           (INST4S VPR128:$src, neon_uimm8:$Imm,
1544             neon_mov_imm_LSLH_operand:$Simm)>;
1545   def : Pat<(v8i16 (opnode VPR128:$src,
1546                    (bitconvert(v4i32 (neonopnode timm:$Imm,
1547                      neon_mov_imm_LSLH_operand:$Simm))))),
1548           (INST4S VPR128:$src, neon_uimm8:$Imm,
1549             neon_mov_imm_LSLH_operand:$Simm)>;
1550   def : Pat<(v2i64 (opnode VPR128:$src,
1551                    (bitconvert(v4i32 (neonopnode timm:$Imm,
1552                      neon_mov_imm_LSLH_operand:$Simm))))),
1553           (INST4S VPR128:$src, neon_uimm8:$Imm,
1554             neon_mov_imm_LSLH_operand:$Simm)>;
1555 }
1556
1557 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1558 defm : Neon_bitwiseVi_patterns<and, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H,
1559                                BICvi_lsl_2S, BICvi_lsl_4S>;
1560
1561 // Additional patterns for Vector Bitwise OR - immedidate
1562 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H,
1563                                ORRvi_lsl_2S, ORRvi_lsl_4S>;
1564
1565
1566 // Vector Move Immediate Masked
1567 let isReMaterializable = 1 in {
1568 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1569 }
1570
1571 // Vector Move Inverted Immediate Masked
1572 let isReMaterializable = 1 in {
1573 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1574 }
1575
1576 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1577                                 Instruction inst, RegisterOperand VPRC>
1578   : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
1579                         (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
1580
1581 // Aliases for Vector Move Immediate Shifted
1582 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1583 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1584 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1585 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1586
1587 // Aliases for Vector Move Inverted Immediate Shifted
1588 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1589 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1590 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1591 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1592
1593 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1594 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1595 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1596 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1597 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1598
1599 // Aliases for Vector Bitwise OR - immedidate
1600 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1601 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1602 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1603 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1604
1605 //  Vector Move Immediate - per byte
1606 let isReMaterializable = 1 in {
1607 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1608                                (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1609                                "movi\t$Rd.8b, $Imm",
1610                                [(set (v8i8 VPR64:$Rd),
1611                                   (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1612                                 NoItinerary>,
1613                 Sched<[WriteFPALU]> {
1614   let cmode = 0b1110;
1615 }
1616
1617 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1618                                 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1619                                 "movi\t$Rd.16b, $Imm",
1620                                 [(set (v16i8 VPR128:$Rd),
1621                                    (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1622                                  NoItinerary>,
1623                 Sched<[WriteFPALU]> {
1624   let cmode = 0b1110;
1625 }
1626 }
1627
1628 // Vector Move Immediate - bytemask, per double word
1629 let isReMaterializable = 1 in {
1630 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1631                                (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1632                                "movi\t $Rd.2d, $Imm",
1633                                [(set (v2i64 VPR128:$Rd),
1634                                   (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1635                                NoItinerary>,
1636                 Sched<[WriteFPALU]> {
1637   let cmode = 0b1110;
1638 }
1639 }
1640
1641 // Vector Move Immediate - bytemask, one doubleword
1642
1643 let isReMaterializable = 1 in {
1644 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1645                            (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1646                            "movi\t $Rd, $Imm",
1647                            [(set (v1i64 FPR64:$Rd),
1648                              (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1649                            NoItinerary>,
1650              Sched<[WriteFPALU]> {
1651   let cmode = 0b1110;
1652 }
1653 }
1654
1655 // Vector Floating Point Move Immediate
1656
1657 class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
1658                       Operand immOpType, bit q, bit op>
1659   : NeonI_1VModImm<q, op,
1660                    (outs VPRC:$Rd), (ins immOpType:$Imm),
1661                    "fmov\t$Rd" # asmlane # ", $Imm",
1662                    [(set (OpTy VPRC:$Rd),
1663                       (OpTy (Neon_fmovi (timm:$Imm))))],
1664                    NoItinerary>,
1665     Sched<[WriteFPALU]> {
1666      let cmode = 0b1111;
1667    }
1668
1669 let isReMaterializable = 1 in {
1670 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64,  v2f32, fmov32_operand, 0b0, 0b0>;
1671 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1672 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1673 }
1674
1675 // Vector Shift (Immediate)
1676
1677 // Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
1678 // as follows:
1679 //
1680 //    Offset    Encoding
1681 //     8        immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
1682 //     16       immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
1683 //     32       immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
1684 //     64       immh:immb<6>   = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
1685 //
1686 // The shift right immediate amount, in the range 1 to element bits, is computed
1687 // as Offset - UInt(immh:immb).  The shift left immediate amount, in the range 0
1688 // to element bits - 1, is computed as UInt(immh:immb) - Offset.
1689
1690 class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
1691   let Name = "ShrImm" # OFFSET;
1692   let RenderMethod = "addImmOperands";
1693   let DiagnosticType = "ShrImm" # OFFSET;
1694 }
1695
1696 class shr_imm<string OFFSET> : Operand<i32> {
1697   let EncoderMethod = "getShiftRightImm" # OFFSET;
1698   let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
1699   let ParserMatchClass =
1700     !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
1701 }
1702
1703 def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
1704 def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
1705 def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
1706 def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
1707
1708 def shr_imm8 : shr_imm<"8">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 8;}]>;
1709 def shr_imm16 : shr_imm<"16">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 16;}]>;
1710 def shr_imm32 : shr_imm<"32">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 32;}]>;
1711 def shr_imm64 : shr_imm<"64">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 64;}]>;
1712
1713 class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
1714   let Name = "ShlImm" # OFFSET;
1715   let RenderMethod = "addImmOperands";
1716   let DiagnosticType = "ShlImm" # OFFSET;
1717 }
1718
1719 class shl_imm<string OFFSET> : Operand<i32> {
1720   let EncoderMethod = "getShiftLeftImm" # OFFSET;
1721   let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
1722   let ParserMatchClass =
1723     !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
1724 }
1725
1726 def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
1727 def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
1728 def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
1729 def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
1730
1731 def shl_imm8 : shl_imm<"8">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 8;}]>;
1732 def shl_imm16 : shl_imm<"16">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 16;}]>;
1733 def shl_imm32 : shl_imm<"32">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 32;}]>;
1734 def shl_imm64 : shl_imm<"64">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 64;}]>;
1735
1736 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
1737                RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
1738   : NeonI_2VShiftImm<q, u, opcode,
1739                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1740                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1741                      [(set (Ty VPRC:$Rd),
1742                         (Ty (OpNode (Ty VPRC:$Rn),
1743                           (Ty (Neon_vdup (i32 ImmTy:$Imm))))))],
1744                      NoItinerary>,
1745     Sched<[WriteFPALU, ReadFPALU]>;
1746
1747 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1748   // 64-bit vector types.
1749   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8, shl> {
1750     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1751   }
1752
1753   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16, shl> {
1754     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1755   }
1756
1757   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32, shl> {
1758     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1759   }
1760
1761   // 128-bit vector types.
1762   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8, shl> {
1763     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1764   }
1765
1766   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16, shl> {
1767     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1768   }
1769
1770   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32, shl> {
1771     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1772   }
1773
1774   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64, shl> {
1775     let Inst{22} = 0b1;        // immh:immb = 1xxxxxx
1776   }
1777 }
1778
1779 multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1780   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1781                      OpNode> {
1782     let Inst{22-19} = 0b0001;
1783   }
1784
1785   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1786                      OpNode> {
1787     let Inst{22-20} = 0b001;
1788   }
1789
1790   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1791                      OpNode> {
1792      let Inst{22-21} = 0b01;
1793   }
1794
1795   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1796                       OpNode> {
1797                       let Inst{22-19} = 0b0001;
1798                     }
1799
1800   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1801                      OpNode> {
1802                      let Inst{22-20} = 0b001;
1803                     }
1804
1805   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1806                      OpNode> {
1807                       let Inst{22-21} = 0b01;
1808                     }
1809
1810   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1811                      OpNode> {
1812                       let Inst{22} = 0b1;
1813                     }
1814 }
1815
1816 // Shift left
1817
1818 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1819
1820 // Additional patterns to match vector shift left by immediate.
1821 // (v1i8/v1i16/v1i32 types)
1822 def : Pat<(v1i8 (shl (v1i8 FPR8:$Rn),
1823                      (v1i8 (Neon_vdup (i32 (shl_imm8:$Imm)))))),
1824           (EXTRACT_SUBREG
1825               (SHLvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
1826                           shl_imm8:$Imm),
1827               sub_8)>;
1828 def : Pat<(v1i16 (shl (v1i16 FPR16:$Rn),
1829                       (v1i16 (Neon_vdup (i32 (shl_imm16:$Imm)))))),
1830           (EXTRACT_SUBREG
1831               (SHLvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
1832                           shl_imm16:$Imm),
1833               sub_16)>;
1834 def : Pat<(v1i32 (shl (v1i32 FPR32:$Rn),
1835                       (v1i32 (Neon_vdup (i32 (shl_imm32:$Imm)))))),
1836           (EXTRACT_SUBREG
1837               (SHLvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
1838                           shl_imm32:$Imm),
1839               sub_32)>;
1840
1841 // Shift right
1842 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
1843 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
1844
1845 // Additional patterns to match vector shift right by immediate.
1846 // (v1i8/v1i16/v1i32 types)
1847 def : Pat<(v1i8 (sra (v1i8 FPR8:$Rn),
1848                      (v1i8 (Neon_vdup (i32 (shr_imm8:$Imm)))))),
1849           (EXTRACT_SUBREG
1850               (SSHRvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
1851                           shr_imm8:$Imm),
1852               sub_8)>;
1853 def : Pat<(v1i16 (sra (v1i16 FPR16:$Rn),
1854                       (v1i16 (Neon_vdup (i32 (shr_imm16:$Imm)))))),
1855           (EXTRACT_SUBREG
1856               (SSHRvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
1857                           shr_imm16:$Imm),
1858               sub_16)>;
1859 def : Pat<(v1i32 (sra (v1i32 FPR32:$Rn),
1860                       (v1i32 (Neon_vdup (i32 (shr_imm32:$Imm)))))),
1861           (EXTRACT_SUBREG
1862               (SSHRvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
1863                           shr_imm32:$Imm),
1864               sub_32)>;
1865 def : Pat<(v1i8 (srl (v1i8 FPR8:$Rn),
1866                      (v1i8 (Neon_vdup (i32 (shr_imm8:$Imm)))))),
1867           (EXTRACT_SUBREG
1868               (USHRvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
1869                           shr_imm8:$Imm),
1870               sub_8)>;
1871 def : Pat<(v1i16 (srl (v1i16 FPR16:$Rn),
1872                       (v1i16 (Neon_vdup (i32 (shr_imm16:$Imm)))))),
1873           (EXTRACT_SUBREG
1874               (USHRvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
1875                           shr_imm16:$Imm),
1876               sub_16)>;
1877 def : Pat<(v1i32 (srl (v1i32 FPR32:$Rn),
1878                       (v1i32 (Neon_vdup (i32 (shr_imm32:$Imm)))))),
1879           (EXTRACT_SUBREG
1880               (USHRvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
1881                           shr_imm32:$Imm),
1882               sub_32)>;
1883
1884 def Neon_High16B : PatFrag<(ops node:$in),
1885                            (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1886 def Neon_High8H  : PatFrag<(ops node:$in),
1887                            (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1888 def Neon_High4S  : PatFrag<(ops node:$in),
1889                            (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1890 def Neon_High2D  : PatFrag<(ops node:$in),
1891                            (extract_subvector (v2i64 node:$in), (iPTR 1))>;
1892 def Neon_High4float : PatFrag<(ops node:$in),
1893                                (extract_subvector (v4f32 node:$in), (iPTR 2))>;
1894 def Neon_High2double : PatFrag<(ops node:$in),
1895                                (extract_subvector (v2f64 node:$in), (iPTR 1))>;
1896
1897 def Neon_Low16B : PatFrag<(ops node:$in),
1898                           (v8i8 (extract_subvector (v16i8 node:$in),
1899                                                    (iPTR 0)))>;
1900 def Neon_Low8H : PatFrag<(ops node:$in),
1901                          (v4i16 (extract_subvector (v8i16 node:$in),
1902                                                    (iPTR 0)))>;
1903 def Neon_Low4S : PatFrag<(ops node:$in),
1904                          (v2i32 (extract_subvector (v4i32 node:$in),
1905                                                    (iPTR 0)))>;
1906 def Neon_Low2D : PatFrag<(ops node:$in),
1907                          (v1i64 (extract_subvector (v2i64 node:$in),
1908                                                    (iPTR 0)))>;
1909 def Neon_Low4float : PatFrag<(ops node:$in),
1910                              (v2f32 (extract_subvector (v4f32 node:$in),
1911                                                        (iPTR 0)))>;
1912 def Neon_Low2double : PatFrag<(ops node:$in),
1913                               (v1f64 (extract_subvector (v2f64 node:$in),
1914                                                         (iPTR 0)))>;
1915
1916 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1917                    string SrcT, ValueType DestTy, ValueType SrcTy,
1918                    Operand ImmTy, SDPatternOperator ExtOp>
1919   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1920                      (ins VPR64:$Rn, ImmTy:$Imm),
1921                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1922                      [(set (DestTy VPR128:$Rd),
1923                         (DestTy (shl
1924                           (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1925                             (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1926                      NoItinerary>,
1927     Sched<[WriteFPALU, ReadFPALU]>;
1928
1929 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1930                        string SrcT, ValueType DestTy, ValueType SrcTy,
1931                        int StartIndex, Operand ImmTy,
1932                        SDPatternOperator ExtOp, PatFrag getTop>
1933   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1934                      (ins VPR128:$Rn, ImmTy:$Imm),
1935                      asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1936                      [(set (DestTy VPR128:$Rd),
1937                         (DestTy (shl
1938                           (DestTy (ExtOp
1939                             (SrcTy (getTop VPR128:$Rn)))),
1940                               (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1941                      NoItinerary>,
1942     Sched<[WriteFPALU, ReadFPALU]>;
1943
1944 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1945                          SDNode ExtOp> {
1946   // 64-bit vector types.
1947   def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1948                          shl_imm8, ExtOp> {
1949     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1950   }
1951
1952   def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1953                          shl_imm16, ExtOp> {
1954     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1955   }
1956
1957   def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1958                          shl_imm32, ExtOp> {
1959     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1960   }
1961
1962   // 128-bit vector types
1963   def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
1964                               8, shl_imm8, ExtOp, Neon_High16B> {
1965     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1966   }
1967
1968   def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
1969                              4, shl_imm16, ExtOp, Neon_High8H> {
1970     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1971   }
1972
1973   def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
1974                              2, shl_imm32, ExtOp, Neon_High4S> {
1975     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1976   }
1977
1978   // Use other patterns to match when the immediate is 0.
1979   def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1980             (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1981
1982   def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1983             (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1984
1985   def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1986             (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1987
1988   def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
1989             (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1990
1991   def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
1992             (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1993
1994   def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
1995             (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1996 }
1997
1998 // Shift left long
1999 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
2000 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
2001
2002 class NeonI_ext_len_alias<string asmop, string lane, string laneOp,
2003                        Instruction inst, RegisterOperand VPRC,
2004                        RegisterOperand VPRCOp>
2005   : NeonInstAlias<asmop # "\t$Rd" # lane #", $Rn" # laneOp,
2006                   (inst VPRC:$Rd, VPRCOp:$Rn, 0), 0b0>;
2007
2008 // Signed integer lengthen (vector) is alias for SSHLL Vd, Vn, #0
2009 // Signed integer lengthen (vector, second part) is alias for SSHLL2 Vd, Vn, #0
2010 // FIXME: This is actually the preferred syntax but TableGen can't deal with
2011 // custom printing of aliases.
2012 def SXTLvv_8B  : NeonI_ext_len_alias<"sxtl", ".8h", ".8b",  SSHLLvvi_8B, VPR128, VPR64>;
2013 def SXTLvv_4H  : NeonI_ext_len_alias<"sxtl", ".4s", ".4h",  SSHLLvvi_4H, VPR128, VPR64>;
2014 def SXTLvv_2S  : NeonI_ext_len_alias<"sxtl", ".2d", ".2s",  SSHLLvvi_2S, VPR128, VPR64>;
2015 def SXTL2vv_16B : NeonI_ext_len_alias<"sxtl2", ".8h", ".16b",  SSHLLvvi_16B, VPR128, VPR128>;
2016 def SXTL2vv_8H  : NeonI_ext_len_alias<"sxtl2", ".4s", ".8h",  SSHLLvvi_8H, VPR128, VPR128>;
2017 def SXTL2vv_4S  : NeonI_ext_len_alias<"sxtl2", ".2d", ".4s",  SSHLLvvi_4S, VPR128, VPR128>;
2018
2019 // Unsigned integer lengthen (vector) is alias for USHLL Vd, Vn, #0
2020 // Unsigned integer lengthen (vector, second part) is alias for USHLL2 Vd, Vn, #0
2021 // FIXME: This is actually the preferred syntax but TableGen can't deal with
2022 // custom printing of aliases.
2023 def UXTLvv_8B  : NeonI_ext_len_alias<"uxtl", ".8h", ".8b",  USHLLvvi_8B, VPR128, VPR64>;
2024 def UXTLvv_4H  : NeonI_ext_len_alias<"uxtl", ".4s", ".4h",  USHLLvvi_4H, VPR128, VPR64>;
2025 def UXTLvv_2S  : NeonI_ext_len_alias<"uxtl", ".2d", ".2s",  USHLLvvi_2S, VPR128, VPR64>;
2026 def UXTL2vv_16B : NeonI_ext_len_alias<"uxtl2", ".8h", ".16b",  USHLLvvi_16B, VPR128, VPR128>;
2027 def UXTL2vv_8H  : NeonI_ext_len_alias<"uxtl2", ".4s", ".8h",  USHLLvvi_8H, VPR128, VPR128>;
2028 def UXTL2vv_4S  : NeonI_ext_len_alias<"uxtl2", ".2d", ".4s",  USHLLvvi_4S, VPR128, VPR128>;
2029
2030 def : Pat<(v8i16 (anyext (v8i8 VPR64:$Rn))), (USHLLvvi_8B VPR64:$Rn, 0)>;
2031 def : Pat<(v4i32 (anyext (v4i16 VPR64:$Rn))), (USHLLvvi_4H VPR64:$Rn, 0)>;
2032 def : Pat<(v2i64 (anyext (v2i32 VPR64:$Rn))), (USHLLvvi_2S VPR64:$Rn, 0)>;
2033
2034 // Rounding/Saturating shift
2035 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
2036                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2037                   SDPatternOperator OpNode>
2038   : NeonI_2VShiftImm<q, u, opcode,
2039                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2040                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2041                      [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
2042                         (i32 ImmTy:$Imm))))],
2043                      NoItinerary>,
2044     Sched<[WriteFPALU, ReadFPALU]>;
2045
2046 // shift right (vector by immediate)
2047 multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
2048                            SDPatternOperator OpNode> {
2049   def _8B  : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2050                          OpNode> {
2051     let Inst{22-19} = 0b0001;
2052   }
2053
2054   def _4H  : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2055                          OpNode> {
2056     let Inst{22-20} = 0b001;
2057   }
2058
2059   def _2S  : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2060                          OpNode> {
2061     let Inst{22-21} = 0b01;
2062   }
2063
2064   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2065                          OpNode> {
2066     let Inst{22-19} = 0b0001;
2067   }
2068
2069   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2070                         OpNode> {
2071     let Inst{22-20} = 0b001;
2072   }
2073
2074   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2075                         OpNode> {
2076     let Inst{22-21} = 0b01;
2077   }
2078
2079   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2080                         OpNode> {
2081     let Inst{22} = 0b1;
2082   }
2083 }
2084
2085 multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
2086                           SDPatternOperator OpNode> {
2087   // 64-bit vector types.
2088   def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
2089                         OpNode> {
2090     let Inst{22-19} = 0b0001;
2091   }
2092
2093   def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
2094                         OpNode> {
2095     let Inst{22-20} = 0b001;
2096   }
2097
2098   def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
2099                         OpNode> {
2100     let Inst{22-21} = 0b01;
2101   }
2102
2103   // 128-bit vector types.
2104   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
2105                          OpNode> {
2106     let Inst{22-19} = 0b0001;
2107   }
2108
2109   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
2110                         OpNode> {
2111     let Inst{22-20} = 0b001;
2112   }
2113
2114   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
2115                         OpNode> {
2116     let Inst{22-21} = 0b01;
2117   }
2118
2119   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
2120                         OpNode> {
2121     let Inst{22} = 0b1;
2122   }
2123 }
2124
2125 // Rounding shift right
2126 defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
2127                                 int_aarch64_neon_vsrshr>;
2128 defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
2129                                 int_aarch64_neon_vurshr>;
2130
2131 // Saturating shift left unsigned
2132 defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
2133
2134 // Saturating shift left
2135 defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
2136 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
2137
2138 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
2139                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2140                   SDNode OpNode>
2141   : NeonI_2VShiftImm<q, u, opcode,
2142            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
2143            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2144            [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
2145               (Ty (OpNode (Ty VPRC:$Rn),
2146                 (Ty (Neon_vdup (i32 ImmTy:$Imm))))))))],
2147            NoItinerary>,
2148     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2149   let Constraints = "$src = $Rd";
2150 }
2151
2152 // Shift Right accumulate
2153 multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
2154   def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2155                         OpNode> {
2156     let Inst{22-19} = 0b0001;
2157   }
2158
2159   def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2160                         OpNode> {
2161     let Inst{22-20} = 0b001;
2162   }
2163
2164   def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2165                         OpNode> {
2166     let Inst{22-21} = 0b01;
2167   }
2168
2169   def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2170                          OpNode> {
2171     let Inst{22-19} = 0b0001;
2172   }
2173
2174   def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2175                         OpNode> {
2176     let Inst{22-20} = 0b001;
2177   }
2178
2179   def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2180                         OpNode> {
2181     let Inst{22-21} = 0b01;
2182   }
2183
2184   def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2185                         OpNode> {
2186     let Inst{22} = 0b1;
2187   }
2188 }
2189
2190 // Shift right and accumulate
2191 defm SSRAvvi    : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
2192 defm USRAvvi    : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
2193
2194 // Rounding shift accumulate
2195 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
2196                     RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2197                     SDPatternOperator OpNode>
2198   : NeonI_2VShiftImm<q, u, opcode,
2199                      (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
2200                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2201                      [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
2202                         (Ty (OpNode (Ty VPRC:$Rn), (i32 ImmTy:$Imm))))))],
2203                      NoItinerary>,
2204     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2205   let Constraints = "$src = $Rd";
2206 }
2207
2208 multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
2209                              SDPatternOperator OpNode> {
2210   def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2211                           OpNode> {
2212     let Inst{22-19} = 0b0001;
2213   }
2214
2215   def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2216                           OpNode> {
2217     let Inst{22-20} = 0b001;
2218   }
2219
2220   def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2221                           OpNode> {
2222     let Inst{22-21} = 0b01;
2223   }
2224
2225   def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2226                            OpNode> {
2227     let Inst{22-19} = 0b0001;
2228   }
2229
2230   def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2231                           OpNode> {
2232     let Inst{22-20} = 0b001;
2233   }
2234
2235   def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2236                           OpNode> {
2237     let Inst{22-21} = 0b01;
2238   }
2239
2240   def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2241                           OpNode> {
2242     let Inst{22} = 0b1;
2243   }
2244 }
2245
2246 // Rounding shift right and accumulate
2247 defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
2248 defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
2249
2250 // Shift insert by immediate
2251 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
2252                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
2253                   SDPatternOperator OpNode>
2254     : NeonI_2VShiftImm<q, u, opcode,
2255            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
2256            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2257            [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
2258              (i32 ImmTy:$Imm))))],
2259            NoItinerary>,
2260       Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2261   let Constraints = "$src = $Rd";
2262 }
2263
2264 // shift left insert (vector by immediate)
2265 multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
2266   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
2267                         int_aarch64_neon_vsli> {
2268     let Inst{22-19} = 0b0001;
2269   }
2270
2271   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
2272                         int_aarch64_neon_vsli> {
2273     let Inst{22-20} = 0b001;
2274   }
2275
2276   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
2277                         int_aarch64_neon_vsli> {
2278     let Inst{22-21} = 0b01;
2279   }
2280
2281     // 128-bit vector types
2282   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
2283                          int_aarch64_neon_vsli> {
2284     let Inst{22-19} = 0b0001;
2285   }
2286
2287   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
2288                         int_aarch64_neon_vsli> {
2289     let Inst{22-20} = 0b001;
2290   }
2291
2292   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
2293                         int_aarch64_neon_vsli> {
2294     let Inst{22-21} = 0b01;
2295   }
2296
2297   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
2298                         int_aarch64_neon_vsli> {
2299     let Inst{22} = 0b1;
2300   }
2301 }
2302
2303 // shift right insert (vector by immediate)
2304 multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
2305     // 64-bit vector types.
2306   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
2307                         int_aarch64_neon_vsri> {
2308     let Inst{22-19} = 0b0001;
2309   }
2310
2311   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2312                         int_aarch64_neon_vsri> {
2313     let Inst{22-20} = 0b001;
2314   }
2315
2316   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2317                         int_aarch64_neon_vsri> {
2318     let Inst{22-21} = 0b01;
2319   }
2320
2321     // 128-bit vector types
2322   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2323                          int_aarch64_neon_vsri> {
2324     let Inst{22-19} = 0b0001;
2325   }
2326
2327   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2328                         int_aarch64_neon_vsri> {
2329     let Inst{22-20} = 0b001;
2330   }
2331
2332   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2333                         int_aarch64_neon_vsri> {
2334     let Inst{22-21} = 0b01;
2335   }
2336
2337   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2338                         int_aarch64_neon_vsri> {
2339     let Inst{22} = 0b1;
2340   }
2341 }
2342
2343 // Shift left and insert
2344 defm SLIvvi   : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
2345
2346 // Shift right and insert
2347 defm SRIvvi   : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
2348
2349 class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2350                     string SrcT, Operand ImmTy>
2351   : NeonI_2VShiftImm<q, u, opcode,
2352                      (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
2353                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2354                      [], NoItinerary>,
2355     Sched<[WriteFPALU, ReadFPALU]>;
2356
2357 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2358                        string SrcT, Operand ImmTy>
2359   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
2360                      (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
2361                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2362                      [], NoItinerary>,
2363     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
2364   let Constraints = "$src = $Rd";
2365 }
2366
2367 // left long shift by immediate
2368 multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
2369   def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
2370     let Inst{22-19} = 0b0001;
2371   }
2372
2373   def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
2374     let Inst{22-20} = 0b001;
2375   }
2376
2377   def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
2378     let Inst{22-21} = 0b01;
2379   }
2380
2381   // Shift Narrow High
2382   def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
2383                               shr_imm8> {
2384     let Inst{22-19} = 0b0001;
2385   }
2386
2387   def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
2388                              shr_imm16> {
2389     let Inst{22-20} = 0b001;
2390   }
2391
2392   def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
2393                              shr_imm32> {
2394     let Inst{22-21} = 0b01;
2395   }
2396 }
2397
2398 // Shift right narrow
2399 defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
2400
2401 // Shift right narrow (prefix Q is saturating, prefix R is rounding)
2402 defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
2403 defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
2404 defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
2405 defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
2406 defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
2407 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
2408 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
2409
2410 def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
2411                               (v2i64 (concat_vectors (v1i64 node:$Rm),
2412                                                      (v1i64 node:$Rn)))>;
2413 def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
2414                               (v8i16 (concat_vectors (v4i16 node:$Rm),
2415                                                      (v4i16 node:$Rn)))>;
2416 def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
2417                               (v4i32 (concat_vectors (v2i32 node:$Rm),
2418                                                      (v2i32 node:$Rn)))>;
2419 def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
2420                               (v4f32 (concat_vectors (v2f32 node:$Rm),
2421                                                      (v2f32 node:$Rn)))>;
2422 def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
2423                               (v2f64 (concat_vectors (v1f64 node:$Rm),
2424                                                      (v1f64 node:$Rn)))>;
2425
2426 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2427                              (v8i16 (srl (v8i16 node:$lhs),
2428                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2429 def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2430                              (v4i32 (srl (v4i32 node:$lhs),
2431                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2432 def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2433                              (v2i64 (srl (v2i64 node:$lhs),
2434                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2435 def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2436                              (v8i16 (sra (v8i16 node:$lhs),
2437                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2438 def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2439                              (v4i32 (sra (v4i32 node:$lhs),
2440                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2441 def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2442                              (v2i64 (sra (v2i64 node:$lhs),
2443                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2444
2445 // Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
2446 multiclass Neon_shiftNarrow_patterns<string shr> {
2447   def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
2448               (i32 shr_imm8:$Imm)))),
2449             (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
2450   def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
2451               (i32 shr_imm16:$Imm)))),
2452             (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
2453   def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
2454               (i32 shr_imm32:$Imm)))),
2455             (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
2456
2457   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2458               (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
2459                 VPR128:$Rn, (i32 shr_imm8:$Imm))))))),
2460             (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
2461                          VPR128:$Rn, imm:$Imm)>;
2462   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2463               (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
2464                 VPR128:$Rn, (i32 shr_imm16:$Imm))))))),
2465             (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2466                         VPR128:$Rn, imm:$Imm)>;
2467   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2468               (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
2469                 VPR128:$Rn, (i32 shr_imm32:$Imm))))))),
2470             (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2471                         VPR128:$Rn, imm:$Imm)>;
2472 }
2473
2474 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
2475   def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), shr_imm8:$Imm)),
2476             (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
2477   def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), shr_imm16:$Imm)),
2478             (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
2479   def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), shr_imm32:$Imm)),
2480             (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
2481
2482   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2483                 (v1i64 (bitconvert (v8i8
2484                     (op (v8i16 VPR128:$Rn), shr_imm8:$Imm))))),
2485             (!cast<Instruction>(prefix # "_16B")
2486                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2487                 VPR128:$Rn, imm:$Imm)>;
2488   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2489                 (v1i64 (bitconvert (v4i16
2490                     (op (v4i32 VPR128:$Rn), shr_imm16:$Imm))))),
2491             (!cast<Instruction>(prefix # "_8H")
2492                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2493                 VPR128:$Rn, imm:$Imm)>;
2494   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2495                 (v1i64 (bitconvert (v2i32
2496                     (op (v2i64 VPR128:$Rn), shr_imm32:$Imm))))),
2497             (!cast<Instruction>(prefix # "_4S")
2498                   (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2499                   VPR128:$Rn, imm:$Imm)>;
2500 }
2501
2502 defm : Neon_shiftNarrow_patterns<"lshr">;
2503 defm : Neon_shiftNarrow_patterns<"ashr">;
2504
2505 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
2506 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
2507 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
2508 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
2509 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
2510 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
2511 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
2512
2513 // Convert fix-point and float-pointing
2514 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
2515                 RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
2516                 Operand ImmTy, SDPatternOperator IntOp>
2517   : NeonI_2VShiftImm<q, u, opcode,
2518                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2519                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2520                      [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
2521                        (i32 ImmTy:$Imm))))],
2522                      NoItinerary>,
2523     Sched<[WriteFPALU, ReadFPALU]>;
2524
2525 multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
2526                               SDPatternOperator IntOp> {
2527   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
2528                       shr_imm32, IntOp> {
2529     let Inst{22-21} = 0b01;
2530   }
2531
2532   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
2533                       shr_imm32, IntOp> {
2534     let Inst{22-21} = 0b01;
2535   }
2536
2537   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
2538                       shr_imm64, IntOp> {
2539     let Inst{22} = 0b1;
2540   }
2541 }
2542
2543 multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
2544                               SDPatternOperator IntOp> {
2545   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
2546                       shr_imm32, IntOp> {
2547     let Inst{22-21} = 0b01;
2548   }
2549
2550   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
2551                       shr_imm32, IntOp> {
2552     let Inst{22-21} = 0b01;
2553   }
2554
2555   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
2556                       shr_imm64, IntOp> {
2557     let Inst{22} = 0b1;
2558   }
2559 }
2560
2561 // Convert fixed-point to floating-point
2562 defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
2563                                    int_arm_neon_vcvtfxs2fp>;
2564 defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
2565                                    int_arm_neon_vcvtfxu2fp>;
2566
2567 // Convert floating-point to fixed-point
2568 defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
2569                                    int_arm_neon_vcvtfp2fxs>;
2570 defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
2571                                    int_arm_neon_vcvtfp2fxu>;
2572
2573 multiclass Neon_sshll2_0<SDNode ext>
2574 {
2575   def _v8i8  : PatFrag<(ops node:$Rn),
2576                        (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
2577   def _v4i16 : PatFrag<(ops node:$Rn),
2578                        (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
2579   def _v2i32 : PatFrag<(ops node:$Rn),
2580                        (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
2581 }
2582
2583 defm NI_sext_high : Neon_sshll2_0<sext>;
2584 defm NI_zext_high : Neon_sshll2_0<zext>;
2585
2586
2587 //===----------------------------------------------------------------------===//
2588 // Multiclasses for NeonI_Across
2589 //===----------------------------------------------------------------------===//
2590
2591 // Variant 1
2592
2593 multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
2594                             string asmop, SDPatternOperator opnode>
2595 {
2596     def _1h8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2597                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2598                 asmop # "\t$Rd, $Rn.8b",
2599                 [(set (v1i16 FPR16:$Rd),
2600                     (v1i16 (opnode (v8i8 VPR64:$Rn))))],
2601                 NoItinerary>,
2602                 Sched<[WriteFPALU, ReadFPALU]>;
2603
2604     def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2605                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2606                 asmop # "\t$Rd, $Rn.16b",
2607                 [(set (v1i16 FPR16:$Rd),
2608                     (v1i16 (opnode (v16i8 VPR128:$Rn))))],
2609                 NoItinerary>,
2610                 Sched<[WriteFPALU, ReadFPALU]>;
2611
2612     def _1s4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2613                 (outs FPR32:$Rd), (ins VPR64:$Rn),
2614                 asmop # "\t$Rd, $Rn.4h",
2615                 [(set (v1i32 FPR32:$Rd),
2616                     (v1i32 (opnode (v4i16 VPR64:$Rn))))],
2617                 NoItinerary>,
2618                 Sched<[WriteFPALU, ReadFPALU]>;
2619
2620     def _1s8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2621                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2622                 asmop # "\t$Rd, $Rn.8h",
2623                 [(set (v1i32 FPR32:$Rd),
2624                     (v1i32 (opnode (v8i16 VPR128:$Rn))))],
2625                 NoItinerary>,
2626                 Sched<[WriteFPALU, ReadFPALU]>;
2627
2628     // _1d2s doesn't exist!
2629
2630     def _1d4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2631                 (outs FPR64:$Rd), (ins VPR128:$Rn),
2632                 asmop # "\t$Rd, $Rn.4s",
2633                 [(set (v1i64 FPR64:$Rd),
2634                     (v1i64 (opnode (v4i32 VPR128:$Rn))))],
2635                 NoItinerary>,
2636                 Sched<[WriteFPALU, ReadFPALU]>;
2637 }
2638
2639 defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
2640 defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
2641
2642 // Variant 2
2643
2644 multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
2645                             string asmop, SDPatternOperator opnode>
2646 {
2647     def _1b8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2648                 (outs FPR8:$Rd), (ins VPR64:$Rn),
2649                 asmop # "\t$Rd, $Rn.8b",
2650                 [(set (v1i8 FPR8:$Rd),
2651                     (v1i8 (opnode (v8i8 VPR64:$Rn))))],
2652                 NoItinerary>,
2653                 Sched<[WriteFPALU, ReadFPALU]>;
2654
2655     def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2656                 (outs FPR8:$Rd), (ins VPR128:$Rn),
2657                 asmop # "\t$Rd, $Rn.16b",
2658                 [(set (v1i8 FPR8:$Rd),
2659                     (v1i8 (opnode (v16i8 VPR128:$Rn))))],
2660                 NoItinerary>,
2661                 Sched<[WriteFPALU, ReadFPALU]>;
2662
2663     def _1h4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2664                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2665                 asmop # "\t$Rd, $Rn.4h",
2666                 [(set (v1i16 FPR16:$Rd),
2667                     (v1i16 (opnode (v4i16 VPR64:$Rn))))],
2668                 NoItinerary>,
2669                 Sched<[WriteFPALU, ReadFPALU]>;
2670
2671     def _1h8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2672                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2673                 asmop # "\t$Rd, $Rn.8h",
2674                 [(set (v1i16 FPR16:$Rd),
2675                     (v1i16 (opnode (v8i16 VPR128:$Rn))))],
2676                 NoItinerary>,
2677                 Sched<[WriteFPALU, ReadFPALU]>;
2678
2679     // _1s2s doesn't exist!
2680
2681     def _1s4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2682                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2683                 asmop # "\t$Rd, $Rn.4s",
2684                 [(set (v1i32 FPR32:$Rd),
2685                     (v1i32 (opnode (v4i32 VPR128:$Rn))))],
2686                 NoItinerary>,
2687                 Sched<[WriteFPALU, ReadFPALU]>;
2688 }
2689
2690 defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
2691 defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
2692
2693 defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
2694 defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
2695
2696 defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
2697
2698 // Variant 3
2699
2700 multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
2701                             string asmop, SDPatternOperator opnode> {
2702     def _1s4s:  NeonI_2VAcross<0b1, u, size, opcode,
2703                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2704                 asmop # "\t$Rd, $Rn.4s",
2705                 [(set (f32 FPR32:$Rd),
2706                     (f32 (opnode (v4f32 VPR128:$Rn))))],
2707                 NoItinerary>,
2708                 Sched<[WriteFPALU, ReadFPALU]>;
2709 }
2710
2711 defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
2712                                 int_aarch64_neon_vmaxnmv>;
2713 defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
2714                                 int_aarch64_neon_vminnmv>;
2715
2716 defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
2717                               int_aarch64_neon_vmaxv>;
2718 defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
2719                               int_aarch64_neon_vminv>;
2720
2721 // The followings are for instruction class (Perm)
2722
2723 class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
2724                     string asmop, RegisterOperand OpVPR, string OpS,
2725                     SDPatternOperator opnode, ValueType Ty>
2726   : NeonI_Perm<q, size, opcode,
2727                (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2728                asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
2729                [(set (Ty OpVPR:$Rd),
2730                   (Ty (opnode (Ty OpVPR:$Rn), (Ty OpVPR:$Rm))))],
2731                NoItinerary>,
2732     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2733
2734 multiclass NeonI_Perm_pat<bits<3> opcode, string asmop,
2735                           SDPatternOperator opnode> {
2736   def _8b  : NeonI_Permute<0b0, 0b00, opcode, asmop,
2737                            VPR64, "8b", opnode, v8i8>;
2738   def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop,
2739                            VPR128, "16b",opnode, v16i8>;
2740   def _4h  : NeonI_Permute<0b0, 0b01, opcode, asmop,
2741                            VPR64, "4h", opnode, v4i16>;
2742   def _8h  : NeonI_Permute<0b1, 0b01, opcode, asmop,
2743                            VPR128, "8h", opnode, v8i16>;
2744   def _2s  : NeonI_Permute<0b0, 0b10, opcode, asmop,
2745                            VPR64, "2s", opnode, v2i32>;
2746   def _4s  : NeonI_Permute<0b1, 0b10, opcode, asmop,
2747                            VPR128, "4s", opnode, v4i32>;
2748   def _2d  : NeonI_Permute<0b1, 0b11, opcode, asmop,
2749                            VPR128, "2d", opnode, v2i64>;
2750 }
2751
2752 defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1", Neon_uzp1>;
2753 defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1", Neon_trn1>;
2754 defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1", Neon_zip1>;
2755 defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2", Neon_uzp2>;
2756 defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2", Neon_trn2>;
2757 defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2", Neon_zip2>;
2758
2759 multiclass NeonI_Perm_float_pat<string INS, SDPatternOperator opnode> {
2760   def : Pat<(v2f32 (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
2761             (!cast<Instruction>(INS # "_2s") VPR64:$Rn, VPR64:$Rm)>;
2762
2763   def : Pat<(v4f32 (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
2764             (!cast<Instruction>(INS # "_4s") VPR128:$Rn, VPR128:$Rm)>;
2765
2766   def : Pat<(v2f64 (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
2767             (!cast<Instruction>(INS # "_2d") VPR128:$Rn, VPR128:$Rm)>;
2768 }
2769
2770 defm : NeonI_Perm_float_pat<"UZP1vvv", Neon_uzp1>;
2771 defm : NeonI_Perm_float_pat<"UZP2vvv", Neon_uzp2>;
2772 defm : NeonI_Perm_float_pat<"ZIP1vvv", Neon_zip1>;
2773 defm : NeonI_Perm_float_pat<"ZIP2vvv", Neon_zip2>;
2774 defm : NeonI_Perm_float_pat<"TRN1vvv", Neon_trn1>;
2775 defm : NeonI_Perm_float_pat<"TRN2vvv", Neon_trn2>;
2776
2777 // The followings are for instruction class (3V Diff)
2778
2779 // normal long/long2 pattern
2780 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
2781                  string asmop, string ResS, string OpS,
2782                  SDPatternOperator opnode, SDPatternOperator ext,
2783                  RegisterOperand OpVPR,
2784                  ValueType ResTy, ValueType OpTy>
2785   : NeonI_3VDiff<q, u, size, opcode,
2786                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2787                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2788                  [(set (ResTy VPR128:$Rd),
2789                     (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
2790                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2791                  NoItinerary>,
2792     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2793
2794 multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
2795                         string asmop, SDPatternOperator opnode,
2796                         bit Commutable = 0> {
2797   let isCommutable = Commutable in {
2798     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2799                            opnode, sext, VPR64, v8i16, v8i8>;
2800     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2801                            opnode, sext, VPR64, v4i32, v4i16>;
2802     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2803                            opnode, sext, VPR64, v2i64, v2i32>;
2804   }
2805 }
2806
2807 multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
2808                          SDPatternOperator opnode, bit Commutable = 0> {
2809   let isCommutable = Commutable in {
2810     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2811                             opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2812     def _4s8h  : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2813                             opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2814     def _2d4s  : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2815                             opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2816   }
2817 }
2818
2819 multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
2820                         SDPatternOperator opnode, bit Commutable = 0> {
2821   let isCommutable = Commutable in {
2822     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2823                            opnode, zext, VPR64, v8i16, v8i8>;
2824     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2825                            opnode, zext, VPR64, v4i32, v4i16>;
2826     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2827                            opnode, zext, VPR64, v2i64, v2i32>;
2828   }
2829 }
2830
2831 multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
2832                          SDPatternOperator opnode, bit Commutable = 0> {
2833   let isCommutable = Commutable in {
2834     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2835                             opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2836     def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2837                            opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2838     def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2839                            opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2840   }
2841 }
2842
2843 defm SADDLvvv :  NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
2844 defm UADDLvvv :  NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
2845
2846 defm SADDL2vvv :  NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
2847 defm UADDL2vvv :  NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
2848
2849 defm SSUBLvvv :  NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
2850 defm USUBLvvv :  NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
2851
2852 defm SSUBL2vvv :  NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
2853 defm USUBL2vvv :  NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
2854
2855 // normal wide/wide2 pattern
2856 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
2857                  string asmop, string ResS, string OpS,
2858                  SDPatternOperator opnode, SDPatternOperator ext,
2859                  RegisterOperand OpVPR,
2860                  ValueType ResTy, ValueType OpTy>
2861   : NeonI_3VDiff<q, u, size, opcode,
2862                  (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
2863                  asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
2864                  [(set (ResTy VPR128:$Rd),
2865                     (ResTy (opnode (ResTy VPR128:$Rn),
2866                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2867                  NoItinerary>,
2868     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2869
2870 multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
2871                         SDPatternOperator opnode> {
2872   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2873                          opnode, sext, VPR64, v8i16, v8i8>;
2874   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2875                          opnode, sext, VPR64, v4i32, v4i16>;
2876   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2877                          opnode, sext, VPR64, v2i64, v2i32>;
2878 }
2879
2880 defm SADDWvvv :  NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
2881 defm SSUBWvvv :  NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
2882
2883 multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
2884                          SDPatternOperator opnode> {
2885   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2886                           opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2887   def _4s8h  : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2888                           opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2889   def _2d4s  : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2890                           opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2891 }
2892
2893 defm SADDW2vvv :  NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
2894 defm SSUBW2vvv :  NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
2895
2896 multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
2897                         SDPatternOperator opnode> {
2898   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2899                          opnode, zext, VPR64, v8i16, v8i8>;
2900   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2901                          opnode, zext, VPR64, v4i32, v4i16>;
2902   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2903                          opnode, zext, VPR64, v2i64, v2i32>;
2904 }
2905
2906 defm UADDWvvv :  NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
2907 defm USUBWvvv :  NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
2908
2909 multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
2910                          SDPatternOperator opnode> {
2911   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2912                           opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2913   def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2914                          opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2915   def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2916                          opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2917 }
2918
2919 defm UADDW2vvv :  NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
2920 defm USUBW2vvv :  NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
2921
2922 // Get the high half part of the vector element.
2923 multiclass NeonI_get_high {
2924   def _8h : PatFrag<(ops node:$Rn),
2925                     (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
2926                                              (v8i16 (Neon_vdup (i32 8)))))))>;
2927   def _4s : PatFrag<(ops node:$Rn),
2928                     (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
2929                                               (v4i32 (Neon_vdup (i32 16)))))))>;
2930   def _2d : PatFrag<(ops node:$Rn),
2931                     (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
2932                                               (v2i64 (Neon_vdup (i32 32)))))))>;
2933 }
2934
2935 defm NI_get_hi : NeonI_get_high;
2936
2937 // pattern for addhn/subhn with 2 operands
2938 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2939                            string asmop, string ResS, string OpS,
2940                            SDPatternOperator opnode, SDPatternOperator get_hi,
2941                            ValueType ResTy, ValueType OpTy>
2942   : NeonI_3VDiff<q, u, size, opcode,
2943                  (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2944                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2945                  [(set (ResTy VPR64:$Rd),
2946                     (ResTy (get_hi
2947                       (OpTy (opnode (OpTy VPR128:$Rn),
2948                                     (OpTy VPR128:$Rm))))))],
2949                  NoItinerary>,
2950     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2951
2952 multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
2953                                 SDPatternOperator opnode, bit Commutable = 0> {
2954   let isCommutable = Commutable in {
2955     def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2956                                      opnode, NI_get_hi_8h, v8i8, v8i16>;
2957     def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2958                                      opnode, NI_get_hi_4s, v4i16, v4i32>;
2959     def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2960                                      opnode, NI_get_hi_2d, v2i32, v2i64>;
2961   }
2962 }
2963
2964 defm ADDHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
2965 defm SUBHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
2966
2967 // pattern for operation with 2 operands
2968 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2969                     string asmop, string ResS, string OpS,
2970                     SDPatternOperator opnode,
2971                     RegisterOperand ResVPR, RegisterOperand OpVPR,
2972                     ValueType ResTy, ValueType OpTy>
2973   : NeonI_3VDiff<q, u, size, opcode,
2974                  (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2975                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2976                  [(set (ResTy ResVPR:$Rd),
2977                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
2978                  NoItinerary>,
2979     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
2980
2981 // normal narrow pattern
2982 multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
2983                           SDPatternOperator opnode, bit Commutable = 0> {
2984   let isCommutable = Commutable in {
2985     def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2986                               opnode, VPR64, VPR128, v8i8, v8i16>;
2987     def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2988                               opnode, VPR64, VPR128, v4i16, v4i32>;
2989     def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2990                               opnode, VPR64, VPR128, v2i32, v2i64>;
2991   }
2992 }
2993
2994 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
2995 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
2996
2997 // pattern for acle intrinsic with 3 operands
2998 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2999                      string asmop, string ResS, string OpS>
3000   : NeonI_3VDiff<q, u, size, opcode,
3001                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
3002                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3003                  [], NoItinerary>,
3004     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
3005   let Constraints = "$src = $Rd";
3006   let neverHasSideEffects = 1;
3007 }
3008
3009 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
3010   def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
3011   def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
3012   def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
3013 }
3014
3015 defm ADDHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
3016 defm SUBHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
3017
3018 defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
3019 defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
3020
3021 // Patterns have to be separate because there's a SUBREG_TO_REG in the output
3022 // part.
3023 class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
3024                         SDPatternOperator coreop>
3025   : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
3026                       (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
3027                                                         (SrcTy VPR128:$Rm)))))),
3028         (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
3029               VPR128:$Rn, VPR128:$Rm)>;
3030
3031 // addhn2 patterns
3032 def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8,  v8i16,
3033           BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
3034 def : NarrowHighHalfPat<ADDHN2vvv_8h4s,  v4i16, v4i32,
3035           BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
3036 def : NarrowHighHalfPat<ADDHN2vvv_4s2d,  v2i32, v2i64,
3037           BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
3038
3039 // subhn2 patterns
3040 def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8,  v8i16,
3041           BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
3042 def : NarrowHighHalfPat<SUBHN2vvv_8h4s,  v4i16, v4i32,
3043           BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
3044 def : NarrowHighHalfPat<SUBHN2vvv_4s2d,  v2i32, v2i64,
3045           BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
3046
3047 // raddhn2 patterns
3048 def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vraddhn>;
3049 def : NarrowHighHalfPat<RADDHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vraddhn>;
3050 def : NarrowHighHalfPat<RADDHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vraddhn>;
3051
3052 // rsubhn2 patterns
3053 def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vrsubhn>;
3054 def : NarrowHighHalfPat<RSUBHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vrsubhn>;
3055 def : NarrowHighHalfPat<RSUBHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vrsubhn>;
3056
3057 // pattern that need to extend result
3058 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
3059                      string asmop, string ResS, string OpS,
3060                      SDPatternOperator opnode,
3061                      RegisterOperand OpVPR,
3062                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
3063   : NeonI_3VDiff<q, u, size, opcode,
3064                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
3065                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3066                  [(set (ResTy VPR128:$Rd),
3067                     (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
3068                                                 (OpTy OpVPR:$Rm))))))],
3069                  NoItinerary>,
3070     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
3071
3072 multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
3073                            SDPatternOperator opnode, bit Commutable = 0> {
3074   let isCommutable = Commutable in {
3075     def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3076                                opnode, VPR64, v8i16, v8i8, v8i8>;
3077     def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3078                                opnode, VPR64, v4i32, v4i16, v4i16>;
3079     def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3080                                opnode, VPR64, v2i64, v2i32, v2i32>;
3081   }
3082 }
3083
3084 defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
3085 defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
3086
3087 multiclass NeonI_Op_High<SDPatternOperator op> {
3088   def _16B : PatFrag<(ops node:$Rn, node:$Rm),
3089                      (op (v8i8 (Neon_High16B node:$Rn)),
3090                          (v8i8 (Neon_High16B node:$Rm)))>;
3091   def _8H  : PatFrag<(ops node:$Rn, node:$Rm),
3092                      (op (v4i16 (Neon_High8H node:$Rn)),
3093                          (v4i16 (Neon_High8H node:$Rm)))>;
3094   def _4S  : PatFrag<(ops node:$Rn, node:$Rm),
3095                      (op (v2i32 (Neon_High4S node:$Rn)),
3096                          (v2i32 (Neon_High4S node:$Rm)))>;
3097 }
3098
3099 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
3100 defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
3101 defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
3102 defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
3103 defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
3104 defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
3105
3106 multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
3107                             bit Commutable = 0> {
3108   let isCommutable = Commutable in {
3109     def _8h8b  : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3110                                 !cast<PatFrag>(opnode # "_16B"),
3111                                 VPR128, v8i16, v16i8, v8i8>;
3112     def _4s4h  : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3113                                 !cast<PatFrag>(opnode # "_8H"),
3114                                 VPR128, v4i32, v8i16, v4i16>;
3115     def _2d2s  : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3116                                 !cast<PatFrag>(opnode # "_4S"),
3117                                 VPR128, v2i64, v4i32, v2i32>;
3118   }
3119 }
3120
3121 defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
3122 defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
3123
3124 // For pattern that need two operators being chained.
3125 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
3126                      string asmop, string ResS, string OpS,
3127                      SDPatternOperator opnode, SDPatternOperator subop,
3128                      RegisterOperand OpVPR,
3129                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
3130   : NeonI_3VDiff<q, u, size, opcode,
3131                  (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
3132                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3133                  [(set (ResTy VPR128:$Rd),
3134                     (ResTy (opnode
3135                       (ResTy VPR128:$src),
3136                       (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
3137                                                  (OpTy OpVPR:$Rm))))))))],
3138                  NoItinerary>,
3139     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
3140   let Constraints = "$src = $Rd";
3141 }
3142
3143 multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
3144                              SDPatternOperator opnode, SDPatternOperator subop>{
3145   def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3146                              opnode, subop, VPR64, v8i16, v8i8, v8i8>;
3147   def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3148                              opnode, subop, VPR64, v4i32, v4i16, v4i16>;
3149   def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3150                              opnode, subop, VPR64, v2i64, v2i32, v2i32>;
3151 }
3152
3153 defm SABALvvv :  NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
3154                                    add, int_arm_neon_vabds>;
3155 defm UABALvvv :  NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
3156                                    add, int_arm_neon_vabdu>;
3157
3158 multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
3159                               SDPatternOperator opnode, string subop> {
3160   def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3161                              opnode, !cast<PatFrag>(subop # "_16B"),
3162                              VPR128, v8i16, v16i8, v8i8>;
3163   def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3164                              opnode, !cast<PatFrag>(subop # "_8H"),
3165                              VPR128, v4i32, v8i16, v4i16>;
3166   def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3167                              opnode, !cast<PatFrag>(subop # "_4S"),
3168                              VPR128, v2i64, v4i32, v2i32>;
3169 }
3170
3171 defm SABAL2vvv :  NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
3172                                      "NI_sabdl_hi">;
3173 defm UABAL2vvv :  NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
3174                                      "NI_uabdl_hi">;
3175
3176 // Long pattern with 2 operands
3177 multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
3178                           SDPatternOperator opnode, bit Commutable = 0> {
3179   let isCommutable = Commutable,
3180       SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
3181     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3182                               opnode, VPR128, VPR64, v8i16, v8i8>;
3183     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3184                               opnode, VPR128, VPR64, v4i32, v4i16>;
3185     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3186                               opnode, VPR128, VPR64, v2i64, v2i32>;
3187   }
3188 }
3189
3190 defm SMULLvvv :  NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
3191 defm UMULLvvv :  NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
3192
3193 class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
3194                            string asmop, string ResS, string OpS,
3195                            SDPatternOperator opnode,
3196                            ValueType ResTy, ValueType OpTy>
3197   : NeonI_3VDiff<q, u, size, opcode,
3198                  (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3199                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3200                  [(set (ResTy VPR128:$Rd),
3201                     (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
3202                  NoItinerary>,
3203     Sched<[WriteFPMul, ReadFPMul, ReadFPMul]>;
3204
3205 multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
3206                                    string opnode, bit Commutable = 0> {
3207   let isCommutable = Commutable in {
3208     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3209                                       !cast<PatFrag>(opnode # "_16B"),
3210                                       v8i16, v16i8>;
3211     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3212                                      !cast<PatFrag>(opnode # "_8H"),
3213                                      v4i32, v8i16>;
3214     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3215                                      !cast<PatFrag>(opnode # "_4S"),
3216                                      v2i64, v4i32>;
3217   }
3218 }
3219
3220 defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
3221                                          "NI_smull_hi", 1>;
3222 defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
3223                                          "NI_umull_hi", 1>;
3224
3225 // Long pattern with 3 operands
3226 class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
3227                      string asmop, string ResS, string OpS,
3228                      SDPatternOperator opnode,
3229                      ValueType ResTy, ValueType OpTy>
3230   : NeonI_3VDiff<q, u, size, opcode,
3231                  (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
3232                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3233                  [(set (ResTy VPR128:$Rd),
3234                     (ResTy (opnode
3235                       (ResTy VPR128:$src),
3236                       (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
3237                NoItinerary>,
3238     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
3239   let Constraints = "$src = $Rd";
3240 }
3241
3242 multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
3243                              SDPatternOperator opnode> {
3244   def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3245                              opnode, v8i16, v8i8>;
3246   def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3247                              opnode, v4i32, v4i16>;
3248   def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3249                              opnode, v2i64, v2i32>;
3250 }
3251
3252 def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3253                          (add node:$Rd,
3254                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
3255
3256 def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3257                          (add node:$Rd,
3258                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
3259
3260 def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3261                          (sub node:$Rd,
3262                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
3263
3264 def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3265                          (sub node:$Rd,
3266                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
3267
3268 defm SMLALvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
3269 defm UMLALvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
3270
3271 defm SMLSLvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
3272 defm UMLSLvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
3273
3274 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
3275                            string asmop, string ResS, string OpS,
3276                            SDPatternOperator subop, SDPatternOperator opnode,
3277                            RegisterOperand OpVPR,
3278                            ValueType ResTy, ValueType OpTy>
3279   : NeonI_3VDiff<q, u, size, opcode,
3280                (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
3281                asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3282                [(set (ResTy VPR128:$Rd),
3283                   (ResTy (subop
3284                     (ResTy VPR128:$src),
3285                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
3286                NoItinerary>,
3287     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
3288   let Constraints = "$src = $Rd";
3289 }
3290
3291 multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop,
3292                                    SDPatternOperator subop, string opnode> {
3293   def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3294                                     subop, !cast<PatFrag>(opnode # "_16B"),
3295                                     VPR128, v8i16, v16i8>;
3296   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3297                                    subop, !cast<PatFrag>(opnode # "_8H"),
3298                                    VPR128, v4i32, v8i16>;
3299   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3300                                    subop, !cast<PatFrag>(opnode # "_4S"),
3301                                    VPR128, v2i64, v4i32>;
3302 }
3303
3304 defm SMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
3305                                           add, "NI_smull_hi">;
3306 defm UMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
3307                                           add, "NI_umull_hi">;
3308
3309 defm SMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
3310                                           sub, "NI_smull_hi">;
3311 defm UMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
3312                                           sub, "NI_umull_hi">;
3313
3314 multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
3315                                     SDPatternOperator opnode> {
3316   def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3317                                    opnode, int_arm_neon_vqdmull,
3318                                    VPR64, v4i32, v4i16>;
3319   def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3320                                    opnode, int_arm_neon_vqdmull,
3321                                    VPR64, v2i64, v2i32>;
3322 }
3323
3324 defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
3325                                            int_arm_neon_vqadds>;
3326 defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
3327                                            int_arm_neon_vqsubs>;
3328
3329 multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
3330                          SDPatternOperator opnode, bit Commutable = 0> {
3331   let isCommutable = Commutable in {
3332     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3333                               opnode, VPR128, VPR64, v4i32, v4i16>;
3334     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3335                               opnode, VPR128, VPR64, v2i64, v2i32>;
3336   }
3337 }
3338
3339 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
3340 defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
3341                                 int_arm_neon_vqdmull, 1>;
3342 }
3343
3344 multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop,
3345                                    string opnode, bit Commutable = 0> {
3346   let isCommutable = Commutable in {
3347     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3348                                      !cast<PatFrag>(opnode # "_8H"),
3349                                      v4i32, v8i16>;
3350     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3351                                      !cast<PatFrag>(opnode # "_4S"),
3352                                      v2i64, v4i32>;
3353   }
3354 }
3355
3356 defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
3357                                            "NI_qdmull_hi", 1>;
3358
3359 multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop,
3360                                      SDPatternOperator opnode> {
3361   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3362                                    opnode, NI_qdmull_hi_8H,
3363                                    VPR128, v4i32, v8i16>;
3364   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3365                                    opnode, NI_qdmull_hi_4S,
3366                                    VPR128, v2i64, v4i32>;
3367 }
3368
3369 defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
3370                                              int_arm_neon_vqadds>;
3371 defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
3372                                              int_arm_neon_vqsubs>;
3373
3374 multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
3375                          SDPatternOperator opnode_8h8b,
3376                          SDPatternOperator opnode_1q1d, bit Commutable = 0> {
3377   let isCommutable = Commutable in {
3378     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3379                               opnode_8h8b, VPR128, VPR64, v8i16, v8i8>;
3380
3381     def _1q1d : NeonI_3VD_2Op<0b0, u, 0b11, opcode, asmop, "1q", "1d",
3382                               opnode_1q1d, VPR128, VPR64, v16i8, v1i64>;
3383   }
3384 }
3385
3386 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in
3387 defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp,
3388                               int_aarch64_neon_vmull_p64, 1>;
3389
3390 multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop,
3391                                    string opnode, bit Commutable = 0> {
3392   let isCommutable = Commutable in {
3393     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3394                                       !cast<PatFrag>(opnode # "_16B"),
3395                                       v8i16, v16i8>;
3396
3397     def _1q2d : 
3398       NeonI_3VDiff<0b1, u, 0b11, opcode,
3399                    (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3400                    asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
3401                    [(set (v16i8 VPR128:$Rd),
3402                       (v16i8 (int_aarch64_neon_vmull_p64 
3403                         (v1i64 (scalar_to_vector
3404                           (i64 (vector_extract (v2i64 VPR128:$Rn), 1)))),
3405                         (v1i64 (scalar_to_vector
3406                           (i64 (vector_extract (v2i64 VPR128:$Rm), 1)))))))],
3407                    NoItinerary>,
3408       Sched<[WriteFPMul, ReadFPMul, ReadFPMul]>;
3409   }
3410
3411   def : Pat<(v16i8 (int_aarch64_neon_vmull_p64
3412                       (v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 1))),
3413                       (v1i64 (extract_subvector (v2i64 VPR128:$Rm), (i64 1))))),
3414             (!cast<Instruction>(NAME # "_1q2d") VPR128:$Rn, VPR128:$Rm)>;
3415 }
3416
3417 defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
3418                                          1>;
3419
3420 // End of implementation for instruction class (3V Diff)
3421
3422 // The followings are vector load/store multiple N-element structure
3423 // (class SIMD lselem).
3424
3425 // ld1:         load multiple 1-element structure to 1/2/3/4 registers.
3426 // ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
3427 //              The structure consists of a sequence of sets of N values.
3428 //              The first element of the structure is placed in the first lane
3429 //              of the first first vector, the second element in the first lane
3430 //              of the second vector, and so on.
3431 // E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
3432 // the three 64-bit vectors list {BA, DC, FE}.
3433 // E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
3434 // 64-bit vectors list {DA, EB, FC}.
3435 // Store instructions store multiple structure to N registers like load.
3436
3437
3438 class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
3439                     RegisterOperand VecList, string asmop>
3440   : NeonI_LdStMult<q, 1, opcode, size,
3441                  (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3442                  asmop # "\t$Rt, [$Rn]",
3443                  [],
3444                  NoItinerary>,
3445     Sched<[WriteVecLd, ReadVecLd]> {
3446   let mayLoad = 1;
3447   let neverHasSideEffects = 1;
3448 }
3449
3450 multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
3451   def _8B : NeonI_LDVList<0, opcode, 0b00,
3452                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3453
3454   def _4H : NeonI_LDVList<0, opcode, 0b01,
3455                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3456
3457   def _2S : NeonI_LDVList<0, opcode, 0b10,
3458                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3459
3460   def _16B : NeonI_LDVList<1, opcode, 0b00,
3461                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3462
3463   def _8H : NeonI_LDVList<1, opcode, 0b01,
3464                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3465
3466   def _4S : NeonI_LDVList<1, opcode, 0b10,
3467                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3468
3469   def _2D : NeonI_LDVList<1, opcode, 0b11,
3470                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3471 }
3472
3473 // Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
3474 defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
3475 def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
3476
3477 defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
3478
3479 defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
3480
3481 defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
3482
3483 // Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
3484 defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
3485 def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
3486
3487 defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
3488 def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
3489
3490 defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
3491 def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
3492
3493 class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
3494                     RegisterOperand VecList, string asmop>
3495   : NeonI_LdStMult<q, 0, opcode, size,
3496                  (outs), (ins GPR64xsp:$Rn, VecList:$Rt),
3497                  asmop # "\t$Rt, [$Rn]",
3498                  [],
3499                  NoItinerary>,
3500     Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
3501   let mayStore = 1;
3502   let neverHasSideEffects = 1;
3503 }
3504
3505 multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
3506   def _8B : NeonI_STVList<0, opcode, 0b00,
3507                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3508
3509   def _4H : NeonI_STVList<0, opcode, 0b01,
3510                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3511
3512   def _2S : NeonI_STVList<0, opcode, 0b10,
3513                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3514
3515   def _16B : NeonI_STVList<1, opcode, 0b00,
3516                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3517
3518   def _8H : NeonI_STVList<1, opcode, 0b01,
3519                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3520
3521   def _4S : NeonI_STVList<1, opcode, 0b10,
3522                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3523
3524   def _2D : NeonI_STVList<1, opcode, 0b11,
3525                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3526 }
3527
3528 // Store multiple N-element structures from N registers (N = 1,2,3,4)
3529 defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
3530 def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
3531
3532 defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
3533
3534 defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
3535
3536 defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
3537
3538 // Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
3539 defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
3540 def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
3541
3542 defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
3543 def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
3544
3545 defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
3546 def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
3547
3548 def : Pat<(v2f64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
3549 def : Pat<(v2i64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
3550
3551 def : Pat<(v4f32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
3552 def : Pat<(v4i32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
3553
3554 def : Pat<(v8i16 (load GPR64xsp:$addr)), (LD1_8H GPR64xsp:$addr)>;
3555 def : Pat<(v16i8 (load GPR64xsp:$addr)), (LD1_16B GPR64xsp:$addr)>;
3556
3557 def : Pat<(v1f64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
3558 def : Pat<(v1i64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
3559
3560 def : Pat<(v2f32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
3561 def : Pat<(v2i32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
3562
3563 def : Pat<(v4i16 (load GPR64xsp:$addr)), (LD1_4H GPR64xsp:$addr)>;
3564 def : Pat<(v8i8 (load GPR64xsp:$addr)), (LD1_8B GPR64xsp:$addr)>;
3565
3566 def : Pat<(store (v2i64 VPR128:$value), GPR64xsp:$addr),
3567           (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
3568 def : Pat<(store (v2f64 VPR128:$value), GPR64xsp:$addr),
3569           (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
3570
3571 def : Pat<(store (v4i32 VPR128:$value), GPR64xsp:$addr),
3572           (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
3573 def : Pat<(store (v4f32 VPR128:$value), GPR64xsp:$addr),
3574           (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
3575
3576 def : Pat<(store (v8i16 VPR128:$value), GPR64xsp:$addr),
3577           (ST1_8H GPR64xsp:$addr, VPR128:$value)>;
3578 def : Pat<(store (v16i8 VPR128:$value), GPR64xsp:$addr),
3579           (ST1_16B GPR64xsp:$addr, VPR128:$value)>;
3580
3581 def : Pat<(store (v1i64 VPR64:$value), GPR64xsp:$addr),
3582           (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
3583 def : Pat<(store (v1f64 VPR64:$value), GPR64xsp:$addr),
3584           (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
3585
3586 def : Pat<(store (v2i32 VPR64:$value), GPR64xsp:$addr),
3587           (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
3588 def : Pat<(store (v2f32 VPR64:$value), GPR64xsp:$addr),
3589           (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
3590
3591 def : Pat<(store (v4i16 VPR64:$value), GPR64xsp:$addr),
3592           (ST1_4H GPR64xsp:$addr, VPR64:$value)>;
3593 def : Pat<(store (v8i8 VPR64:$value), GPR64xsp:$addr),
3594           (ST1_8B GPR64xsp:$addr, VPR64:$value)>;
3595
3596 // Match load/store of v1i8/v1i16/v1i32 type to FPR8/FPR16/FPR32 load/store.
3597 // FIXME: for now we have v1i8, v1i16, v1i32 legal types, if they are illegal,
3598 // these patterns are not needed any more.
3599 def : Pat<(v1i8 (load GPR64xsp:$addr)), (LSFP8_LDR $addr, 0)>;
3600 def : Pat<(v1i16 (load GPR64xsp:$addr)), (LSFP16_LDR $addr, 0)>;
3601 def : Pat<(v1i32 (load GPR64xsp:$addr)), (LSFP32_LDR $addr, 0)>;
3602
3603 def : Pat<(store (v1i8 FPR8:$value), GPR64xsp:$addr),
3604           (LSFP8_STR $value, $addr, 0)>;
3605 def : Pat<(store (v1i16 FPR16:$value), GPR64xsp:$addr),
3606           (LSFP16_STR $value, $addr, 0)>;
3607 def : Pat<(store (v1i32 FPR32:$value), GPR64xsp:$addr),
3608           (LSFP32_STR $value, $addr, 0)>;
3609
3610
3611 // End of vector load/store multiple N-element structure(class SIMD lselem)
3612
3613 // The followings are post-index vector load/store multiple N-element
3614 // structure(class SIMD lselem-post)
3615 def exact1_asmoperand : AsmOperandClass {
3616   let Name = "Exact1";
3617   let PredicateMethod = "isExactImm<1>";
3618   let RenderMethod = "addImmOperands";
3619 }
3620 def uimm_exact1 : Operand<i32>, ImmLeaf<i32, [{return Imm == 1;}]> {
3621   let ParserMatchClass = exact1_asmoperand;
3622 }
3623
3624 def exact2_asmoperand : AsmOperandClass {
3625   let Name = "Exact2";
3626   let PredicateMethod = "isExactImm<2>";
3627   let RenderMethod = "addImmOperands";
3628 }
3629 def uimm_exact2 : Operand<i32>, ImmLeaf<i32, [{return Imm == 2;}]> {
3630   let ParserMatchClass = exact2_asmoperand;
3631 }
3632
3633 def exact3_asmoperand : AsmOperandClass {
3634   let Name = "Exact3";
3635   let PredicateMethod = "isExactImm<3>";
3636   let RenderMethod = "addImmOperands";
3637 }
3638 def uimm_exact3 : Operand<i32>, ImmLeaf<i32, [{return Imm == 3;}]> {
3639   let ParserMatchClass = exact3_asmoperand;
3640 }
3641
3642 def exact4_asmoperand : AsmOperandClass {
3643   let Name = "Exact4";
3644   let PredicateMethod = "isExactImm<4>";
3645   let RenderMethod = "addImmOperands";
3646 }
3647 def uimm_exact4 : Operand<i32>, ImmLeaf<i32, [{return Imm == 4;}]> {
3648   let ParserMatchClass = exact4_asmoperand;
3649 }
3650
3651 def exact6_asmoperand : AsmOperandClass {
3652   let Name = "Exact6";
3653   let PredicateMethod = "isExactImm<6>";
3654   let RenderMethod = "addImmOperands";
3655 }
3656 def uimm_exact6 : Operand<i32>, ImmLeaf<i32, [{return Imm == 6;}]> {
3657   let ParserMatchClass = exact6_asmoperand;
3658 }
3659
3660 def exact8_asmoperand : AsmOperandClass {
3661   let Name = "Exact8";
3662   let PredicateMethod = "isExactImm<8>";
3663   let RenderMethod = "addImmOperands";
3664 }
3665 def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
3666   let ParserMatchClass = exact8_asmoperand;
3667 }
3668
3669 def exact12_asmoperand : AsmOperandClass {
3670   let Name = "Exact12";
3671   let PredicateMethod = "isExactImm<12>";
3672   let RenderMethod = "addImmOperands";
3673 }
3674 def uimm_exact12 : Operand<i32>, ImmLeaf<i32, [{return Imm == 12;}]> {
3675   let ParserMatchClass = exact12_asmoperand;
3676 }
3677
3678 def exact16_asmoperand : AsmOperandClass {
3679   let Name = "Exact16";
3680   let PredicateMethod = "isExactImm<16>";
3681   let RenderMethod = "addImmOperands";
3682 }
3683 def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
3684   let ParserMatchClass = exact16_asmoperand;
3685 }
3686
3687 def exact24_asmoperand : AsmOperandClass {
3688   let Name = "Exact24";
3689   let PredicateMethod = "isExactImm<24>";
3690   let RenderMethod = "addImmOperands";
3691 }
3692 def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
3693   let ParserMatchClass = exact24_asmoperand;
3694 }
3695
3696 def exact32_asmoperand : AsmOperandClass {
3697   let Name = "Exact32";
3698   let PredicateMethod = "isExactImm<32>";
3699   let RenderMethod = "addImmOperands";
3700 }
3701 def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
3702   let ParserMatchClass = exact32_asmoperand;
3703 }
3704
3705 def exact48_asmoperand : AsmOperandClass {
3706   let Name = "Exact48";
3707   let PredicateMethod = "isExactImm<48>";
3708   let RenderMethod = "addImmOperands";
3709 }
3710 def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
3711   let ParserMatchClass = exact48_asmoperand;
3712 }
3713
3714 def exact64_asmoperand : AsmOperandClass {
3715   let Name = "Exact64";
3716   let PredicateMethod = "isExactImm<64>";
3717   let RenderMethod = "addImmOperands";
3718 }
3719 def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
3720   let ParserMatchClass = exact64_asmoperand;
3721 }
3722
3723 multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
3724                            RegisterOperand VecList, Operand ImmTy,
3725                            string asmop> {
3726   let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1,
3727       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3728     def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
3729                      (outs VecList:$Rt, GPR64xsp:$wb),
3730                      (ins GPR64xsp:$Rn, ImmTy:$amt),
3731                      asmop # "\t$Rt, [$Rn], $amt",
3732                      [],
3733                      NoItinerary>,
3734                  Sched<[WriteVecLd, WriteVecLd, ReadVecLd]> {
3735       let Rm = 0b11111;
3736     }
3737
3738     def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
3739                         (outs VecList:$Rt, GPR64xsp:$wb),
3740                         (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
3741                         asmop # "\t$Rt, [$Rn], $Rm",
3742                         [],
3743                         NoItinerary>,
3744                     Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]>;
3745   }
3746 }
3747
3748 multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3749     Operand ImmTy2, string asmop> {
3750   defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
3751                               !cast<RegisterOperand>(List # "8B_operand"),
3752                               ImmTy, asmop>;
3753
3754   defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
3755                               !cast<RegisterOperand>(List # "4H_operand"),
3756                               ImmTy, asmop>;
3757
3758   defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
3759                               !cast<RegisterOperand>(List # "2S_operand"),
3760                               ImmTy, asmop>;
3761
3762   defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
3763                                !cast<RegisterOperand>(List # "16B_operand"),
3764                                ImmTy2, asmop>;
3765
3766   defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
3767                               !cast<RegisterOperand>(List # "8H_operand"),
3768                               ImmTy2, asmop>;
3769
3770   defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
3771                               !cast<RegisterOperand>(List # "4S_operand"),
3772                               ImmTy2, asmop>;
3773
3774   defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
3775                               !cast<RegisterOperand>(List # "2D_operand"),
3776                               ImmTy2, asmop>;
3777 }
3778
3779 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3780 defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
3781 defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3782                                  "ld1">;
3783
3784 defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
3785
3786 defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3787                              "ld3">;
3788
3789 defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
3790
3791 // Post-index load multiple 1-element structures from N consecutive registers
3792 // (N = 2,3,4)
3793 defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3794                                "ld1">;
3795 defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3796                                    uimm_exact16, "ld1">;
3797
3798 defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3799                                "ld1">;
3800 defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3801                                    uimm_exact24, "ld1">;
3802
3803 defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3804                                 "ld1">;
3805 defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3806                                    uimm_exact32, "ld1">;
3807
3808 multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
3809                             RegisterOperand VecList, Operand ImmTy,
3810                             string asmop> {
3811   let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
3812       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3813     def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
3814                      (outs GPR64xsp:$wb),
3815                      (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
3816                      asmop # "\t$Rt, [$Rn], $amt",
3817                      [],
3818                      NoItinerary>,
3819                  Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
3820       let Rm = 0b11111;
3821     }
3822
3823     def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
3824                       (outs GPR64xsp:$wb),
3825                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt),
3826                       asmop # "\t$Rt, [$Rn], $Rm",
3827                       [],
3828                       NoItinerary>,
3829                     Sched<[WriteVecSt, ReadVecSt, ReadVecSt, ReadVecSt]>;
3830   }
3831 }
3832
3833 multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3834                            Operand ImmTy2, string asmop> {
3835   defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
3836                  !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
3837
3838   defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
3839                               !cast<RegisterOperand>(List # "4H_operand"),
3840                               ImmTy, asmop>;
3841
3842   defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
3843                               !cast<RegisterOperand>(List # "2S_operand"),
3844                               ImmTy, asmop>;
3845
3846   defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
3847                                !cast<RegisterOperand>(List # "16B_operand"),
3848                                ImmTy2, asmop>;
3849
3850   defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
3851                               !cast<RegisterOperand>(List # "8H_operand"),
3852                               ImmTy2, asmop>;
3853
3854   defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
3855                               !cast<RegisterOperand>(List # "4S_operand"),
3856                               ImmTy2, asmop>;
3857
3858   defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
3859                               !cast<RegisterOperand>(List # "2D_operand"),
3860                               ImmTy2, asmop>;
3861 }
3862
3863 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3864 defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
3865 defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3866                                  "st1">;
3867
3868 defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
3869
3870 defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3871                              "st3">;
3872
3873 defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
3874
3875 // Post-index load multiple 1-element structures from N consecutive registers
3876 // (N = 2,3,4)
3877 defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3878                                "st1">;
3879 defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3880                                    uimm_exact16, "st1">;
3881
3882 defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3883                                "st1">;
3884 defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3885                                    uimm_exact24, "st1">;
3886
3887 defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3888                                "st1">;
3889 defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3890                                    uimm_exact32, "st1">;
3891
3892 // End of post-index vector load/store multiple N-element structure
3893 // (class SIMD lselem-post)
3894
3895 // The followings are vector load/store single N-element structure
3896 // (class SIMD lsone).
3897 def neon_uimm0_bare : Operand<i64>,
3898                         ImmLeaf<i64, [{return Imm == 0;}]> {
3899   let ParserMatchClass = neon_uimm0_asmoperand;
3900   let PrintMethod = "printUImmBareOperand";
3901 }
3902
3903 def neon_uimm1_bare : Operand<i64>,
3904                         ImmLeaf<i64, [{return Imm < 2;}]> {
3905   let ParserMatchClass = neon_uimm1_asmoperand;
3906   let PrintMethod = "printUImmBareOperand";
3907 }
3908
3909 def neon_uimm2_bare : Operand<i64>,
3910                         ImmLeaf<i64, [{return Imm < 4;}]> {
3911   let ParserMatchClass = neon_uimm2_asmoperand;
3912   let PrintMethod = "printUImmBareOperand";
3913 }
3914
3915 def neon_uimm3_bare : Operand<i64>,
3916                         ImmLeaf<i64, [{return Imm < 8;}]> {
3917   let ParserMatchClass = uimm3_asmoperand;
3918   let PrintMethod = "printUImmBareOperand";
3919 }
3920
3921 def neon_uimm4_bare : Operand<i64>,
3922                         ImmLeaf<i64, [{return Imm < 16;}]> {
3923   let ParserMatchClass = uimm4_asmoperand;
3924   let PrintMethod = "printUImmBareOperand";
3925 }
3926
3927 class NeonI_LDN_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
3928                     RegisterOperand VecList, string asmop>
3929     : NeonI_LdOne_Dup<q, r, opcode, size,
3930                       (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3931                       asmop # "\t$Rt, [$Rn]",
3932                       [],
3933                       NoItinerary>,
3934       Sched<[WriteVecLd, ReadVecLd]> {
3935   let mayLoad = 1;
3936   let neverHasSideEffects = 1;
3937 }
3938
3939 multiclass LDN_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop> {
3940   def _8B : NeonI_LDN_Dup<0, r, opcode, 0b00,
3941                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3942
3943   def _4H : NeonI_LDN_Dup<0, r, opcode, 0b01,
3944                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3945
3946   def _2S : NeonI_LDN_Dup<0, r, opcode, 0b10,
3947                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3948
3949   def _1D : NeonI_LDN_Dup<0, r, opcode, 0b11,
3950                           !cast<RegisterOperand>(List # "1D_operand"), asmop>;
3951
3952   def _16B : NeonI_LDN_Dup<1, r, opcode, 0b00,
3953                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3954
3955   def _8H : NeonI_LDN_Dup<1, r, opcode, 0b01,
3956                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3957
3958   def _4S : NeonI_LDN_Dup<1, r, opcode, 0b10,
3959                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3960
3961   def _2D : NeonI_LDN_Dup<1, r, opcode, 0b11,
3962                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3963 }
3964
3965 // Load single 1-element structure to all lanes of 1 register
3966 defm LD1R : LDN_Dup_BHSD<0b0, 0b110, "VOne", "ld1r">;
3967
3968 // Load single N-element structure to all lanes of N consecutive
3969 // registers (N = 2,3,4)
3970 defm LD2R : LDN_Dup_BHSD<0b1, 0b110, "VPair", "ld2r">;
3971 defm LD3R : LDN_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r">;
3972 defm LD4R : LDN_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r">;
3973
3974
3975 class LD1R_pattern <ValueType VTy, ValueType DTy, PatFrag LoadOp,
3976                     Instruction INST>
3977     : Pat<(VTy (Neon_vdup (DTy (LoadOp GPR64xsp:$Rn)))),
3978           (VTy (INST GPR64xsp:$Rn))>;
3979
3980 // Match all LD1R instructions
3981 def : LD1R_pattern<v8i8, i32, extloadi8, LD1R_8B>;
3982
3983 def : LD1R_pattern<v16i8, i32, extloadi8, LD1R_16B>;
3984
3985 def : LD1R_pattern<v4i16, i32, extloadi16, LD1R_4H>;
3986
3987 def : LD1R_pattern<v8i16, i32, extloadi16, LD1R_8H>;
3988
3989 def : LD1R_pattern<v2i32, i32, load, LD1R_2S>;
3990 def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
3991
3992 def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
3993 def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
3994
3995 def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
3996 def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
3997
3998 class LD1R_pattern_v1 <ValueType VTy, ValueType DTy, PatFrag LoadOp,
3999                        Instruction INST>
4000   : Pat<(VTy (scalar_to_vector (DTy (LoadOp GPR64xsp:$Rn)))),
4001         (VTy (INST GPR64xsp:$Rn))>;
4002
4003 def : LD1R_pattern_v1<v1i64, i64, load, LD1R_1D>;
4004 def : LD1R_pattern_v1<v1f64, f64, load, LD1R_1D>;
4005
4006 multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
4007                                 RegisterClass RegList> {
4008   defm B : VectorList_operands<PREFIX, "B", Count, RegList>;
4009   defm H : VectorList_operands<PREFIX, "H", Count, RegList>;
4010   defm S : VectorList_operands<PREFIX, "S", Count, RegList>;
4011   defm D : VectorList_operands<PREFIX, "D", Count, RegList>;
4012 }
4013
4014 // Special vector list operand of 128-bit vectors with bare layout.
4015 // i.e. only show ".b", ".h", ".s", ".d"
4016 defm VOne : VectorList_Bare_BHSD<"VOne", 1, FPR128>;
4017 defm VPair : VectorList_Bare_BHSD<"VPair", 2, QPair>;
4018 defm VTriple : VectorList_Bare_BHSD<"VTriple", 3, QTriple>;
4019 defm VQuad : VectorList_Bare_BHSD<"VQuad", 4, QQuad>;
4020
4021 class NeonI_LDN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4022                      Operand ImmOp, string asmop>
4023     : NeonI_LdStOne_Lane<1, r, op2_1, op0,
4024                          (outs VList:$Rt),
4025                          (ins GPR64xsp:$Rn, VList:$src, ImmOp:$lane),
4026                          asmop # "\t$Rt[$lane], [$Rn]",
4027                          [],
4028                          NoItinerary>,
4029       Sched<[WriteVecLd, ReadVecLd, ReadVecLd]> {
4030   let mayLoad = 1;
4031   let neverHasSideEffects = 1;
4032   let hasExtraDefRegAllocReq = 1;
4033   let Constraints = "$src = $Rt";
4034 }
4035
4036 multiclass LDN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
4037   def _B : NeonI_LDN_Lane<r, 0b00, op0,
4038                           !cast<RegisterOperand>(List # "B_operand"),
4039                           neon_uimm4_bare, asmop> {
4040     let Inst{12-10} = lane{2-0};
4041     let Inst{30} = lane{3};
4042   }
4043
4044   def _H : NeonI_LDN_Lane<r, 0b01, op0,
4045                           !cast<RegisterOperand>(List # "H_operand"),
4046                           neon_uimm3_bare, asmop> {
4047     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4048     let Inst{30} = lane{2};
4049   }
4050
4051   def _S : NeonI_LDN_Lane<r, 0b10, op0,
4052                           !cast<RegisterOperand>(List # "S_operand"),
4053                           neon_uimm2_bare, asmop> {
4054     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4055     let Inst{30} = lane{1};
4056   }
4057
4058   def _D : NeonI_LDN_Lane<r, 0b10, op0,
4059                           !cast<RegisterOperand>(List # "D_operand"),
4060                           neon_uimm1_bare, asmop> {
4061     let Inst{12-10} = 0b001;
4062     let Inst{30} = lane{0};
4063   }
4064 }
4065
4066 // Load single 1-element structure to one lane of 1 register.
4067 defm LD1LN : LDN_Lane_BHSD<0b0, 0b0, "VOne", "ld1">;
4068
4069 // Load single N-element structure to one lane of N consecutive registers
4070 // (N = 2,3,4)
4071 defm LD2LN : LDN_Lane_BHSD<0b1, 0b0, "VPair", "ld2">;
4072 defm LD3LN : LDN_Lane_BHSD<0b0, 0b1, "VTriple", "ld3">;
4073 defm LD4LN : LDN_Lane_BHSD<0b1, 0b1, "VQuad", "ld4">;
4074
4075 multiclass LD1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
4076                           Operand ImmOp, Operand ImmOp2, PatFrag LoadOp,
4077                           Instruction INST> {
4078   def : Pat<(VTy (vector_insert (VTy VPR64:$src),
4079                      (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp:$lane))),
4080             (VTy (EXTRACT_SUBREG
4081                      (INST GPR64xsp:$Rn,
4082                            (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
4083                            ImmOp:$lane),
4084                      sub_64))>;
4085
4086   def : Pat<(VTy2 (vector_insert (VTy2 VPR128:$src),
4087                       (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp2:$lane))),
4088             (VTy2 (INST GPR64xsp:$Rn, VPR128:$src, ImmOp2:$lane))>;
4089 }
4090
4091 // Match all LD1LN instructions
4092 defm : LD1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
4093                       extloadi8, LD1LN_B>;
4094
4095 defm : LD1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
4096                       extloadi16, LD1LN_H>;
4097
4098 defm : LD1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
4099                       load, LD1LN_S>;
4100 defm : LD1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
4101                       load, LD1LN_S>;
4102
4103 defm : LD1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
4104                       load, LD1LN_D>;
4105 defm : LD1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
4106                       load, LD1LN_D>;
4107
4108 class NeonI_STN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4109                      Operand ImmOp, string asmop>
4110     : NeonI_LdStOne_Lane<0, r, op2_1, op0,
4111                          (outs), (ins GPR64xsp:$Rn, VList:$Rt, ImmOp:$lane),
4112                          asmop # "\t$Rt[$lane], [$Rn]",
4113                          [],
4114                          NoItinerary>,
4115       Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
4116   let mayStore = 1;
4117   let neverHasSideEffects = 1;
4118   let hasExtraDefRegAllocReq = 1;
4119 }
4120
4121 multiclass STN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
4122   def _B : NeonI_STN_Lane<r, 0b00, op0,
4123                           !cast<RegisterOperand>(List # "B_operand"),
4124                           neon_uimm4_bare, asmop> {
4125     let Inst{12-10} = lane{2-0};
4126     let Inst{30} = lane{3};
4127   }
4128
4129   def _H : NeonI_STN_Lane<r, 0b01, op0,
4130                           !cast<RegisterOperand>(List # "H_operand"),
4131                           neon_uimm3_bare, asmop> {
4132     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4133     let Inst{30} = lane{2};
4134   }
4135
4136   def _S : NeonI_STN_Lane<r, 0b10, op0,
4137                           !cast<RegisterOperand>(List # "S_operand"),
4138                            neon_uimm2_bare, asmop> {
4139     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4140     let Inst{30} = lane{1};
4141   }
4142
4143   def _D : NeonI_STN_Lane<r, 0b10, op0,
4144                           !cast<RegisterOperand>(List # "D_operand"),
4145                           neon_uimm1_bare, asmop>{
4146     let Inst{12-10} = 0b001;
4147     let Inst{30} = lane{0};
4148   }
4149 }
4150
4151 // Store single 1-element structure from one lane of 1 register.
4152 defm ST1LN : STN_Lane_BHSD<0b0, 0b0, "VOne", "st1">;
4153
4154 // Store single N-element structure from one lane of N consecutive registers
4155 // (N = 2,3,4)
4156 defm ST2LN : STN_Lane_BHSD<0b1, 0b0, "VPair", "st2">;
4157 defm ST3LN : STN_Lane_BHSD<0b0, 0b1, "VTriple", "st3">;
4158 defm ST4LN : STN_Lane_BHSD<0b1, 0b1, "VQuad", "st4">;
4159
4160 multiclass ST1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
4161                           Operand ImmOp, Operand ImmOp2, PatFrag StoreOp,
4162                           Instruction INST> {
4163   def : Pat<(StoreOp (DTy (vector_extract (VTy VPR64:$Rt), ImmOp:$lane)),
4164                      GPR64xsp:$Rn),
4165             (INST GPR64xsp:$Rn,
4166                   (SUBREG_TO_REG (i64 0), VPR64:$Rt, sub_64),
4167                   ImmOp:$lane)>;
4168
4169   def : Pat<(StoreOp (DTy (vector_extract (VTy2 VPR128:$Rt), ImmOp2:$lane)),
4170                      GPR64xsp:$Rn),
4171             (INST GPR64xsp:$Rn, VPR128:$Rt, ImmOp2:$lane)>;
4172 }
4173
4174 // Match all ST1LN instructions
4175 defm : ST1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
4176                       truncstorei8, ST1LN_B>;
4177
4178 defm : ST1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
4179                       truncstorei16, ST1LN_H>;
4180
4181 defm : ST1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
4182                       store, ST1LN_S>;
4183 defm : ST1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
4184                       store, ST1LN_S>;
4185
4186 defm : ST1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
4187                       store, ST1LN_D>;
4188 defm : ST1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
4189                       store, ST1LN_D>;
4190
4191 // End of vector load/store single N-element structure (class SIMD lsone).
4192
4193
4194 // The following are post-index load/store single N-element instructions
4195 // (class SIMD lsone-post)
4196
4197 multiclass NeonI_LDN_WB_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
4198                             RegisterOperand VecList, Operand ImmTy,
4199                             string asmop> {
4200   let mayLoad = 1, neverHasSideEffects = 1, Constraints = "$wb = $Rn",
4201   DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
4202     def _fixed : NeonI_LdOne_Dup_Post<q, r, opcode, size,
4203                       (outs VecList:$Rt, GPR64xsp:$wb),
4204                       (ins GPR64xsp:$Rn, ImmTy:$amt),
4205                       asmop # "\t$Rt, [$Rn], $amt",
4206                       [],
4207                       NoItinerary>,
4208                  Sched<[WriteVecLd, WriteVecLd, ReadVecLd]> {
4209       let Rm = 0b11111;
4210     }
4211
4212     def _register : NeonI_LdOne_Dup_Post<q, r, opcode, size,
4213                       (outs VecList:$Rt, GPR64xsp:$wb),
4214                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
4215                       asmop # "\t$Rt, [$Rn], $Rm",
4216                       [],
4217                       NoItinerary>,
4218                     Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]>;
4219   }
4220 }
4221
4222 multiclass LDWB_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop,
4223                          Operand uimm_b, Operand uimm_h,
4224                          Operand uimm_s, Operand uimm_d> {
4225   defm _8B : NeonI_LDN_WB_Dup<0, r, opcode, 0b00,
4226                               !cast<RegisterOperand>(List # "8B_operand"),
4227                               uimm_b, asmop>;
4228
4229   defm _4H : NeonI_LDN_WB_Dup<0, r, opcode, 0b01,
4230                               !cast<RegisterOperand>(List # "4H_operand"),
4231                               uimm_h, asmop>;
4232
4233   defm _2S : NeonI_LDN_WB_Dup<0, r, opcode, 0b10,
4234                               !cast<RegisterOperand>(List # "2S_operand"),
4235                               uimm_s, asmop>;
4236
4237   defm _1D : NeonI_LDN_WB_Dup<0, r, opcode, 0b11,
4238                               !cast<RegisterOperand>(List # "1D_operand"),
4239                               uimm_d, asmop>;
4240
4241   defm _16B : NeonI_LDN_WB_Dup<1, r, opcode, 0b00,
4242                                !cast<RegisterOperand>(List # "16B_operand"),
4243                                uimm_b, asmop>;
4244
4245   defm _8H : NeonI_LDN_WB_Dup<1, r, opcode, 0b01,
4246                               !cast<RegisterOperand>(List # "8H_operand"),
4247                               uimm_h, asmop>;
4248
4249   defm _4S : NeonI_LDN_WB_Dup<1, r, opcode, 0b10,
4250                               !cast<RegisterOperand>(List # "4S_operand"),
4251                               uimm_s, asmop>;
4252
4253   defm _2D : NeonI_LDN_WB_Dup<1, r, opcode, 0b11,
4254                               !cast<RegisterOperand>(List # "2D_operand"),
4255                               uimm_d, asmop>;
4256 }
4257
4258 // Post-index load single 1-element structure to all lanes of 1 register
4259 defm LD1R_WB : LDWB_Dup_BHSD<0b0, 0b110, "VOne", "ld1r", uimm_exact1,
4260                              uimm_exact2, uimm_exact4, uimm_exact8>;
4261
4262 // Post-index load single N-element structure to all lanes of N consecutive
4263 // registers (N = 2,3,4)
4264 defm LD2R_WB : LDWB_Dup_BHSD<0b1, 0b110, "VPair", "ld2r", uimm_exact2,
4265                              uimm_exact4, uimm_exact8, uimm_exact16>;
4266 defm LD3R_WB : LDWB_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r", uimm_exact3,
4267                              uimm_exact6, uimm_exact12, uimm_exact24>;
4268 defm LD4R_WB : LDWB_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r", uimm_exact4,
4269                              uimm_exact8, uimm_exact16, uimm_exact32>;
4270
4271 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
4272     Constraints = "$Rn = $wb, $Rt = $src",
4273     DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
4274   class LDN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4275                                 Operand ImmTy, Operand ImmOp, string asmop>
4276       : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
4277                                 (outs VList:$Rt, GPR64xsp:$wb),
4278                                 (ins GPR64xsp:$Rn, ImmTy:$amt,
4279                                     VList:$src, ImmOp:$lane),
4280                                 asmop # "\t$Rt[$lane], [$Rn], $amt",
4281                                 [],
4282                                 NoItinerary>,
4283         Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]> {
4284     let Rm = 0b11111;
4285   }
4286
4287   class LDN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4288                                  Operand ImmTy, Operand ImmOp, string asmop>
4289       : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
4290                                 (outs VList:$Rt, GPR64xsp:$wb),
4291                                 (ins GPR64xsp:$Rn, GPR64noxzr:$Rm,
4292                                     VList:$src, ImmOp:$lane),
4293                                 asmop # "\t$Rt[$lane], [$Rn], $Rm",
4294                                 [],
4295                                 NoItinerary>,
4296         Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd, ReadVecLd]>;
4297 }
4298
4299 multiclass LD_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
4300                            Operand uimm_b, Operand uimm_h,
4301                            Operand uimm_s, Operand uimm_d> {
4302   def _B_fixed : LDN_WBFx_Lane<r, 0b00, op0,
4303                                !cast<RegisterOperand>(List # "B_operand"),
4304                                uimm_b, neon_uimm4_bare, asmop> {
4305     let Inst{12-10} = lane{2-0};
4306     let Inst{30} = lane{3};
4307   }
4308
4309   def _B_register : LDN_WBReg_Lane<r, 0b00, op0,
4310                                    !cast<RegisterOperand>(List # "B_operand"),
4311                                    uimm_b, neon_uimm4_bare, asmop> {
4312     let Inst{12-10} = lane{2-0};
4313     let Inst{30} = lane{3};
4314   }
4315
4316   def _H_fixed : LDN_WBFx_Lane<r, 0b01, op0,
4317                                !cast<RegisterOperand>(List # "H_operand"),
4318                                uimm_h, neon_uimm3_bare, asmop> {
4319     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4320     let Inst{30} = lane{2};
4321   }
4322
4323   def _H_register : LDN_WBReg_Lane<r, 0b01, op0,
4324                                    !cast<RegisterOperand>(List # "H_operand"),
4325                                    uimm_h, neon_uimm3_bare, asmop> {
4326     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4327     let Inst{30} = lane{2};
4328   }
4329
4330   def _S_fixed : LDN_WBFx_Lane<r, 0b10, op0,
4331                                !cast<RegisterOperand>(List # "S_operand"),
4332                                uimm_s, neon_uimm2_bare, asmop> {
4333     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4334     let Inst{30} = lane{1};
4335   }
4336
4337   def _S_register : LDN_WBReg_Lane<r, 0b10, op0,
4338                                    !cast<RegisterOperand>(List # "S_operand"),
4339                                    uimm_s, neon_uimm2_bare, asmop> {
4340     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4341     let Inst{30} = lane{1};
4342   }
4343
4344   def _D_fixed : LDN_WBFx_Lane<r, 0b10, op0,
4345                                !cast<RegisterOperand>(List # "D_operand"),
4346                                uimm_d, neon_uimm1_bare, asmop> {
4347     let Inst{12-10} = 0b001;
4348     let Inst{30} = lane{0};
4349   }
4350
4351   def _D_register : LDN_WBReg_Lane<r, 0b10, op0,
4352                                    !cast<RegisterOperand>(List # "D_operand"),
4353                                    uimm_d, neon_uimm1_bare, asmop> {
4354     let Inst{12-10} = 0b001;
4355     let Inst{30} = lane{0};
4356   }
4357 }
4358
4359 // Post-index load single 1-element structure to one lane of 1 register.
4360 defm LD1LN_WB : LD_Lane_WB_BHSD<0b0, 0b0, "VOne", "ld1", uimm_exact1,
4361                                 uimm_exact2, uimm_exact4, uimm_exact8>;
4362
4363 // Post-index load single N-element structure to one lane of N consecutive
4364 // registers
4365 // (N = 2,3,4)
4366 defm LD2LN_WB : LD_Lane_WB_BHSD<0b1, 0b0, "VPair", "ld2", uimm_exact2,
4367                                 uimm_exact4, uimm_exact8, uimm_exact16>;
4368 defm LD3LN_WB : LD_Lane_WB_BHSD<0b0, 0b1, "VTriple", "ld3", uimm_exact3,
4369                                 uimm_exact6, uimm_exact12, uimm_exact24>;
4370 defm LD4LN_WB : LD_Lane_WB_BHSD<0b1, 0b1, "VQuad", "ld4", uimm_exact4,
4371                                 uimm_exact8, uimm_exact16, uimm_exact32>;
4372
4373 let mayStore = 1, neverHasSideEffects = 1,
4374     hasExtraDefRegAllocReq = 1, Constraints = "$Rn = $wb",
4375     DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
4376   class STN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4377                       Operand ImmTy, Operand ImmOp, string asmop>
4378       : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
4379                                 (outs GPR64xsp:$wb),
4380                                 (ins GPR64xsp:$Rn, ImmTy:$amt,
4381                                     VList:$Rt, ImmOp:$lane),
4382                                 asmop # "\t$Rt[$lane], [$Rn], $amt",
4383                                 [],
4384                                 NoItinerary>,
4385         Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
4386     let Rm = 0b11111;
4387   }
4388
4389   class STN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4390                        Operand ImmTy, Operand ImmOp, string asmop>
4391       : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
4392                                 (outs GPR64xsp:$wb),
4393                                 (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VList:$Rt,
4394                                     ImmOp:$lane),
4395                                 asmop # "\t$Rt[$lane], [$Rn], $Rm",
4396                                 [],
4397                                 NoItinerary>,
4398         Sched<[WriteVecSt, ReadVecSt, ReadVecSt, ReadVecSt]>;
4399 }
4400
4401 multiclass ST_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
4402                            Operand uimm_b, Operand uimm_h,
4403                            Operand uimm_s, Operand uimm_d> {
4404   def _B_fixed : STN_WBFx_Lane<r, 0b00, op0,
4405                                !cast<RegisterOperand>(List # "B_operand"),
4406                                uimm_b, neon_uimm4_bare, asmop> {
4407     let Inst{12-10} = lane{2-0};
4408     let Inst{30} = lane{3};
4409   }
4410
4411   def _B_register : STN_WBReg_Lane<r, 0b00, op0,
4412                                    !cast<RegisterOperand>(List # "B_operand"),
4413                                    uimm_b, neon_uimm4_bare, asmop> {
4414     let Inst{12-10} = lane{2-0};
4415     let Inst{30} = lane{3};
4416   }
4417
4418   def _H_fixed : STN_WBFx_Lane<r, 0b01, op0,
4419                                !cast<RegisterOperand>(List # "H_operand"),
4420                                uimm_h, neon_uimm3_bare, asmop> {
4421     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4422     let Inst{30} = lane{2};
4423   }
4424
4425   def _H_register : STN_WBReg_Lane<r, 0b01, op0,
4426                                    !cast<RegisterOperand>(List # "H_operand"),
4427                                    uimm_h, neon_uimm3_bare, asmop> {
4428     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4429     let Inst{30} = lane{2};
4430   }
4431
4432   def _S_fixed : STN_WBFx_Lane<r, 0b10, op0,
4433                                !cast<RegisterOperand>(List # "S_operand"),
4434                                uimm_s, neon_uimm2_bare, asmop> {
4435     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4436     let Inst{30} = lane{1};
4437   }
4438
4439   def _S_register : STN_WBReg_Lane<r, 0b10, op0,
4440                                    !cast<RegisterOperand>(List # "S_operand"),
4441                                    uimm_s, neon_uimm2_bare, asmop> {
4442     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4443     let Inst{30} = lane{1};
4444   }
4445
4446   def _D_fixed : STN_WBFx_Lane<r, 0b10, op0,
4447                                !cast<RegisterOperand>(List # "D_operand"),
4448                                uimm_d, neon_uimm1_bare, asmop> {
4449     let Inst{12-10} = 0b001;
4450     let Inst{30} = lane{0};
4451   }
4452
4453   def _D_register : STN_WBReg_Lane<r, 0b10, op0,
4454                                    !cast<RegisterOperand>(List # "D_operand"),
4455                                    uimm_d, neon_uimm1_bare, asmop> {
4456     let Inst{12-10} = 0b001;
4457     let Inst{30} = lane{0};
4458   }
4459 }
4460
4461 // Post-index store single 1-element structure from one lane of 1 register.
4462 defm ST1LN_WB : ST_Lane_WB_BHSD<0b0, 0b0, "VOne", "st1", uimm_exact1,
4463                                 uimm_exact2, uimm_exact4, uimm_exact8>;
4464
4465 // Post-index store single N-element structure from one lane of N consecutive
4466 // registers (N = 2,3,4)
4467 defm ST2LN_WB : ST_Lane_WB_BHSD<0b1, 0b0, "VPair", "st2", uimm_exact2,
4468                                 uimm_exact4, uimm_exact8, uimm_exact16>;
4469 defm ST3LN_WB : ST_Lane_WB_BHSD<0b0, 0b1, "VTriple", "st3", uimm_exact3,
4470                                 uimm_exact6, uimm_exact12, uimm_exact24>;
4471 defm ST4LN_WB : ST_Lane_WB_BHSD<0b1, 0b1, "VQuad", "st4", uimm_exact4,
4472                                 uimm_exact8, uimm_exact16, uimm_exact32>;
4473
4474 // End of post-index load/store single N-element instructions
4475 // (class SIMD lsone-post)
4476
4477 // Neon Scalar instructions implementation
4478 // Scalar Three Same
4479
4480 class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
4481                              RegisterClass FPRC>
4482   : NeonI_Scalar3Same<u, size, opcode,
4483                       (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
4484                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4485                       [],
4486                       NoItinerary>,
4487     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
4488
4489 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
4490   : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
4491
4492 multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
4493                                       bit Commutable = 0> {
4494   let isCommutable = Commutable in {
4495     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
4496     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
4497   }
4498 }
4499
4500 multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
4501                                       string asmop, bit Commutable = 0> {
4502   let isCommutable = Commutable in {
4503     def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
4504     def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
4505   }
4506 }
4507
4508 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
4509                                         string asmop, bit Commutable = 0> {
4510   let isCommutable = Commutable in {
4511     def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
4512     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
4513     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
4514     def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
4515   }
4516 }
4517
4518 multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
4519                                             Instruction INSTD> {
4520   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
4521             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4522 }
4523
4524 multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
4525                                                Instruction INSTB,
4526                                                Instruction INSTH,
4527                                                Instruction INSTS,
4528                                                Instruction INSTD>
4529   : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
4530   def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
4531            (INSTB FPR8:$Rn, FPR8:$Rm)>;
4532   def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4533            (INSTH FPR16:$Rn, FPR16:$Rm)>;
4534   def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4535            (INSTS FPR32:$Rn, FPR32:$Rm)>;
4536 }
4537
4538 multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
4539                                              Instruction INSTH,
4540                                              Instruction INSTS> {
4541   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4542             (INSTH FPR16:$Rn, FPR16:$Rm)>;
4543   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4544             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4545 }
4546
4547 multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
4548                                              ValueType SResTy, ValueType STy,
4549                                              Instruction INSTS, ValueType DResTy,
4550                                              ValueType DTy, Instruction INSTD> {
4551   def : Pat<(SResTy (opnode (STy FPR32:$Rn), (STy FPR32:$Rm))),
4552             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4553   def : Pat<(DResTy (opnode (DTy FPR64:$Rn), (DTy FPR64:$Rm))),
4554             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4555 }
4556
4557 class Neon_Scalar3Same_cmp_V1_D_size_patterns<CondCode CC,
4558                                               Instruction INSTD>
4559   : Pat<(v1i64 (Neon_cmp (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm), CC)),
4560         (INSTD FPR64:$Rn, FPR64:$Rm)>;
4561
4562 // Scalar Three Different
4563
4564 class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
4565                              RegisterClass FPRCD, RegisterClass FPRCS>
4566   : NeonI_Scalar3Diff<u, size, opcode,
4567                       (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
4568                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4569                       [],
4570                       NoItinerary>,
4571     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
4572
4573 multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
4574   def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
4575   def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
4576 }
4577
4578 multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
4579   let Constraints = "$Src = $Rd" in {
4580     def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
4581                        (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
4582                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4583                        [],
4584                        NoItinerary>,
4585               Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]>;
4586     def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
4587                        (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
4588                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4589                        [],
4590                        NoItinerary>,
4591               Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]>;
4592   }
4593 }
4594
4595 multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
4596                                              Instruction INSTH,
4597                                              Instruction INSTS> {
4598   def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4599             (INSTH FPR16:$Rn, FPR16:$Rm)>;
4600   def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4601             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4602 }
4603
4604 multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
4605                                              Instruction INSTH,
4606                                              Instruction INSTS> {
4607   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4608             (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
4609   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4610             (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
4611 }
4612
4613 // Scalar Two Registers Miscellaneous
4614
4615 class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
4616                              RegisterClass FPRCD, RegisterClass FPRCS>
4617   : NeonI_Scalar2SameMisc<u, size, opcode,
4618                           (outs FPRCD:$Rd), (ins FPRCS:$Rn),
4619                           !strconcat(asmop, "\t$Rd, $Rn"),
4620                           [],
4621                           NoItinerary>,
4622     Sched<[WriteFPALU, ReadFPALU]>;
4623
4624 multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
4625                                          string asmop> {
4626   def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
4627                                       FPR32>;
4628   def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
4629                                       FPR64>;
4630 }
4631
4632 multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
4633   def dd : NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
4634 }
4635
4636 multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
4637   : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
4638   def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
4639   def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
4640   def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
4641 }
4642
4643 class NeonI_Scalar2SameMisc_fcvtxn_D_size<bit u, bits<5> opcode, string asmop>
4644   : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR32, FPR64>;
4645
4646 multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
4647                                                  string asmop> {
4648   def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
4649   def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
4650   def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
4651 }
4652
4653 class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
4654                                        string asmop, RegisterClass FPRC>
4655   : NeonI_Scalar2SameMisc<u, size, opcode,
4656                           (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
4657                           !strconcat(asmop, "\t$Rd, $Rn"),
4658                           [],
4659                           NoItinerary>,
4660     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
4661
4662 multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
4663                                                  string asmop> {
4664
4665   let Constraints = "$Src = $Rd" in {
4666     def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
4667     def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
4668     def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
4669     def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
4670   }
4671 }
4672
4673 class Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<SDPatternOperator opnode,
4674                                                   Instruction INSTD>
4675   : Pat<(f32 (opnode (f64 FPR64:$Rn))),
4676         (INSTD FPR64:$Rn)>;
4677
4678 multiclass Neon_Scalar2SameMisc_fcvt_SD_size_patterns<SDPatternOperator opnode,
4679                                                       Instruction INSTS,
4680                                                       Instruction INSTD> {
4681   def : Pat<(v1i32 (opnode (f32 FPR32:$Rn))),
4682             (INSTS FPR32:$Rn)>;
4683   def : Pat<(v1i64 (opnode (f64 FPR64:$Rn))),
4684             (INSTD FPR64:$Rn)>;
4685 }
4686
4687 class Neon_Scalar2SameMisc_vcvt_D_size_patterns<SDPatternOperator opnode,
4688                                                 Instruction INSTD>
4689   : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))),
4690             (INSTD FPR64:$Rn)>;
4691
4692 multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator opnode,
4693                                                      Instruction INSTS,
4694                                                      Instruction INSTD> {
4695   def : Pat<(f32 (opnode (v1i32 FPR32:$Rn))),
4696             (INSTS FPR32:$Rn)>;
4697   def : Pat<(f64 (opnode (v1i64 FPR64:$Rn))),
4698             (INSTD FPR64:$Rn)>;
4699 }
4700
4701 multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
4702                                                  Instruction INSTS,
4703                                                  Instruction INSTD> {
4704   def : Pat<(f32 (opnode (f32 FPR32:$Rn))),
4705             (INSTS FPR32:$Rn)>;
4706   def : Pat<(f64 (opnode (f64 FPR64:$Rn))),
4707             (INSTD FPR64:$Rn)>;
4708 }
4709
4710 class Neon_Scalar2SameMisc_V1_D_size_patterns<SDPatternOperator opnode,
4711                                               Instruction INSTD>
4712   : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
4713         (INSTD FPR64:$Rn)>;
4714
4715 class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
4716   : NeonI_Scalar2SameMisc<u, 0b11, opcode,
4717                           (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
4718                           !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4719                           [],
4720                           NoItinerary>,
4721     Sched<[WriteFPALU, ReadFPALU]>;
4722
4723 multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
4724                                               string asmop> {
4725   def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
4726                            (outs FPR32:$Rd), (ins FPR32:$Rn, fpzz32:$FPImm),
4727                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
4728                            [],
4729                            NoItinerary>,
4730             Sched<[WriteFPALU, ReadFPALU]>;
4731   def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
4732                            (outs FPR64:$Rd), (ins FPR64:$Rn, fpzz32:$FPImm),
4733                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
4734                            [],
4735                            NoItinerary>,
4736             Sched<[WriteFPALU, ReadFPALU]>;
4737 }
4738
4739 class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
4740                                                 Instruction INSTD>
4741   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4742                        (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
4743         (INSTD FPR64:$Rn, 0)>;
4744
4745 class Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<CondCode CC,
4746                                                    Instruction INSTD>
4747   : Pat<(v1i64 (Neon_cmpz (v1i64 FPR64:$Rn),
4748                           (i32 neon_uimm0:$Imm), CC)),
4749         (INSTD FPR64:$Rn, neon_uimm0:$Imm)>;
4750
4751 multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
4752                                                       CondCode CC,
4753                                                       Instruction INSTS,
4754                                                       Instruction INSTD> {
4755   def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 fpzz32:$FPImm))),
4756             (INSTS FPR32:$Rn, fpzz32:$FPImm)>;
4757   def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f32 fpzz32:$FPImm))),
4758             (INSTD FPR64:$Rn, fpzz32:$FPImm)>;
4759   def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpzz32:$FPImm), CC)),
4760             (INSTD FPR64:$Rn, fpzz32:$FPImm)>;
4761 }
4762
4763 multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
4764                                                 Instruction INSTD> {
4765   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
4766             (INSTD FPR64:$Rn)>;
4767 }
4768
4769 multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
4770                                                    Instruction INSTB,
4771                                                    Instruction INSTH,
4772                                                    Instruction INSTS,
4773                                                    Instruction INSTD>
4774   : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
4775   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
4776             (INSTB FPR8:$Rn)>;
4777   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
4778             (INSTH FPR16:$Rn)>;
4779   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
4780             (INSTS FPR32:$Rn)>;
4781 }
4782
4783 multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
4784                                                        SDPatternOperator opnode,
4785                                                        Instruction INSTH,
4786                                                        Instruction INSTS,
4787                                                        Instruction INSTD> {
4788   def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
4789             (INSTH FPR16:$Rn)>;
4790   def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
4791             (INSTS FPR32:$Rn)>;
4792   def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
4793             (INSTD FPR64:$Rn)>;
4794
4795 }
4796
4797 multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
4798                                                        SDPatternOperator opnode,
4799                                                        Instruction INSTB,
4800                                                        Instruction INSTH,
4801                                                        Instruction INSTS,
4802                                                        Instruction INSTD> {
4803   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
4804             (INSTB FPR8:$Src, FPR8:$Rn)>;
4805   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
4806             (INSTH FPR16:$Src, FPR16:$Rn)>;
4807   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
4808             (INSTS FPR32:$Src, FPR32:$Rn)>;
4809   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
4810             (INSTD FPR64:$Src, FPR64:$Rn)>;
4811 }
4812
4813 // Scalar Shift By Immediate
4814
4815 class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
4816                                 RegisterClass FPRC, Operand ImmTy>
4817   : NeonI_ScalarShiftImm<u, opcode,
4818                          (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
4819                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4820                          [], NoItinerary>,
4821     Sched<[WriteFPALU, ReadFPALU]>;
4822
4823 multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
4824                                             string asmop> {
4825   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4826     bits<6> Imm;
4827     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4828     let Inst{21-16} = Imm;
4829   }
4830 }
4831
4832 multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
4833                                                string asmop>
4834   : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
4835   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
4836     bits<3> Imm;
4837     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4838     let Inst{18-16} = Imm;
4839   }
4840   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
4841     bits<4> Imm;
4842     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4843     let Inst{19-16} = Imm;
4844   }
4845   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4846     bits<5> Imm;
4847     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4848     let Inst{20-16} = Imm;
4849   }
4850 }
4851
4852 multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
4853                                             string asmop> {
4854   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
4855     bits<6> Imm;
4856     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4857     let Inst{21-16} = Imm;
4858   }
4859 }
4860
4861 multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
4862                                               string asmop>
4863   : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
4864   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
4865     bits<3> Imm;
4866     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4867     let Inst{18-16} = Imm;
4868   }
4869   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
4870     bits<4> Imm;
4871     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4872     let Inst{19-16} = Imm;
4873   }
4874   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
4875     bits<5> Imm;
4876     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4877     let Inst{20-16} = Imm;
4878   }
4879 }
4880
4881 class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4882   : NeonI_ScalarShiftImm<u, opcode,
4883                          (outs FPR64:$Rd),
4884                          (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
4885                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4886                          [], NoItinerary>,
4887     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
4888     bits<6> Imm;
4889     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4890     let Inst{21-16} = Imm;
4891     let Constraints = "$Src = $Rd";
4892 }
4893
4894 class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4895   : NeonI_ScalarShiftImm<u, opcode,
4896                          (outs FPR64:$Rd),
4897                          (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
4898                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4899                          [], NoItinerary>,
4900     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
4901     bits<6> Imm;
4902     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4903     let Inst{21-16} = Imm;
4904     let Constraints = "$Src = $Rd";
4905 }
4906
4907 class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
4908                                        RegisterClass FPRCD, RegisterClass FPRCS,
4909                                        Operand ImmTy>
4910   : NeonI_ScalarShiftImm<u, opcode,
4911                          (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
4912                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4913                          [], NoItinerary>,
4914     Sched<[WriteFPALU, ReadFPALU]>;
4915
4916 multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
4917                                                 string asmop> {
4918   def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
4919                                              shr_imm8> {
4920     bits<3> Imm;
4921     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4922     let Inst{18-16} = Imm;
4923   }
4924   def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
4925                                              shr_imm16> {
4926     bits<4> Imm;
4927     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4928     let Inst{19-16} = Imm;
4929   }
4930   def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
4931                                              shr_imm32> {
4932     bits<5> Imm;
4933     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4934     let Inst{20-16} = Imm;
4935   }
4936 }
4937
4938 multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
4939   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4940     bits<5> Imm;
4941     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4942     let Inst{20-16} = Imm;
4943   }
4944   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4945     bits<6> Imm;
4946     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4947     let Inst{21-16} = Imm;
4948   }
4949 }
4950
4951 multiclass Neon_ScalarShiftRImm_D_size_patterns<SDPatternOperator opnode,
4952                                                Instruction INSTD> {
4953   def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
4954                 (INSTD FPR64:$Rn, imm:$Imm)>;
4955 }
4956
4957 multiclass Neon_ScalarShiftLImm_D_size_patterns<SDPatternOperator opnode,
4958                                                Instruction INSTD> {
4959   def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shl_imm64:$Imm))),
4960                 (INSTD FPR64:$Rn, imm:$Imm)>;
4961 }
4962
4963 class Neon_ScalarShiftLImm_V1_D_size_patterns<SDPatternOperator opnode,
4964                                              Instruction INSTD>
4965   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4966             (v1i64 (Neon_vdup (i32 shl_imm64:$Imm))))),
4967         (INSTD FPR64:$Rn, imm:$Imm)>;
4968
4969 class Neon_ScalarShiftRImm_V1_D_size_patterns<SDPatternOperator opnode,
4970                                              Instruction INSTD>
4971   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4972             (v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))),
4973         (INSTD FPR64:$Rn, imm:$Imm)>;
4974
4975 multiclass Neon_ScalarShiftLImm_BHSD_size_patterns<SDPatternOperator opnode,
4976                                                    Instruction INSTB,
4977                                                    Instruction INSTH,
4978                                                    Instruction INSTS,
4979                                                    Instruction INSTD>
4980   : Neon_ScalarShiftLImm_D_size_patterns<opnode, INSTD> {
4981   def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 shl_imm8:$Imm))),
4982                 (INSTB FPR8:$Rn, imm:$Imm)>;
4983   def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 shl_imm16:$Imm))),
4984                 (INSTH FPR16:$Rn, imm:$Imm)>;
4985   def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 shl_imm32:$Imm))),
4986                 (INSTS FPR32:$Rn, imm:$Imm)>;
4987 }
4988
4989 class Neon_ScalarShiftLImm_accum_D_size_patterns<SDPatternOperator opnode,
4990                                                 Instruction INSTD>
4991   : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
4992             (i32 shl_imm64:$Imm))),
4993         (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
4994
4995 class Neon_ScalarShiftRImm_accum_D_size_patterns<SDPatternOperator opnode,
4996                                                 Instruction INSTD>
4997   : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
4998             (i32 shr_imm64:$Imm))),
4999         (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
5000
5001 multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
5002                                                        SDPatternOperator opnode,
5003                                                        Instruction INSTH,
5004                                                        Instruction INSTS,
5005                                                        Instruction INSTD> {
5006   def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 shr_imm16:$Imm))),
5007                 (INSTH FPR16:$Rn, imm:$Imm)>;
5008   def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
5009                 (INSTS FPR32:$Rn, imm:$Imm)>;
5010   def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5011                 (INSTD FPR64:$Rn, imm:$Imm)>;
5012 }
5013
5014 multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator opnode,
5015                                                       Instruction INSTS,
5016                                                       Instruction INSTD> {
5017   def ssi : Pat<(f32 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
5018                 (INSTS FPR32:$Rn, imm:$Imm)>;
5019   def ddi : Pat<(f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5020                 (INSTD FPR64:$Rn, imm:$Imm)>;
5021 }
5022
5023 multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator opnode,
5024                                                       Instruction INSTS,
5025                                                       Instruction INSTD> {
5026   def ssi : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
5027                 (INSTS FPR32:$Rn, imm:$Imm)>;
5028   def ddi : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5029                 (INSTD FPR64:$Rn, imm:$Imm)>;
5030 }
5031
5032 // Scalar Signed Shift Right (Immediate)
5033 defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
5034 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
5035 // Pattern to match llvm.arm.* intrinsic.
5036 def : Neon_ScalarShiftRImm_V1_D_size_patterns<sra, SSHRddi>;
5037
5038 // Scalar Unsigned Shift Right (Immediate)
5039 defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
5040 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
5041 // Pattern to match llvm.arm.* intrinsic.
5042 def : Neon_ScalarShiftRImm_V1_D_size_patterns<srl, USHRddi>;
5043
5044 // Scalar Signed Rounding Shift Right (Immediate)
5045 defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
5046 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
5047
5048 // Scalar Unigned Rounding Shift Right (Immediate)
5049 defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
5050 defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
5051
5052 // Scalar Signed Shift Right and Accumulate (Immediate)
5053 def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
5054 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5055           <int_aarch64_neon_vsrads_n, SSRA>;
5056
5057 // Scalar Unsigned Shift Right and Accumulate (Immediate)
5058 def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
5059 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5060           <int_aarch64_neon_vsradu_n, USRA>;
5061
5062 // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
5063 def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
5064 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5065           <int_aarch64_neon_vrsrads_n, SRSRA>;
5066
5067 // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
5068 def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
5069 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5070           <int_aarch64_neon_vrsradu_n, URSRA>;
5071
5072 // Scalar Shift Left (Immediate)
5073 defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
5074 defm : Neon_ScalarShiftLImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
5075 // Pattern to match llvm.arm.* intrinsic.
5076 def : Neon_ScalarShiftLImm_V1_D_size_patterns<shl, SHLddi>;
5077
5078 // Signed Saturating Shift Left (Immediate)
5079 defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
5080 defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
5081                                                SQSHLbbi, SQSHLhhi,
5082                                                SQSHLssi, SQSHLddi>;
5083 // Pattern to match llvm.arm.* intrinsic.
5084 defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
5085
5086 // Unsigned Saturating Shift Left (Immediate)
5087 defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
5088 defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
5089                                                UQSHLbbi, UQSHLhhi,
5090                                                UQSHLssi, UQSHLddi>;
5091 // Pattern to match llvm.arm.* intrinsic.
5092 defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
5093
5094 // Signed Saturating Shift Left Unsigned (Immediate)
5095 defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
5096 defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
5097                                                SQSHLUbbi, SQSHLUhhi,
5098                                                SQSHLUssi, SQSHLUddi>;
5099
5100 // Shift Right And Insert (Immediate)
5101 def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
5102 def : Neon_ScalarShiftRImm_accum_D_size_patterns
5103           <int_aarch64_neon_vsri, SRI>;
5104
5105 // Shift Left And Insert (Immediate)
5106 def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
5107 def : Neon_ScalarShiftLImm_accum_D_size_patterns
5108           <int_aarch64_neon_vsli, SLI>;
5109
5110 // Signed Saturating Shift Right Narrow (Immediate)
5111 defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
5112 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
5113                                                     SQSHRNbhi, SQSHRNhsi,
5114                                                     SQSHRNsdi>;
5115
5116 // Unsigned Saturating Shift Right Narrow (Immediate)
5117 defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
5118 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
5119                                                     UQSHRNbhi, UQSHRNhsi,
5120                                                     UQSHRNsdi>;
5121
5122 // Signed Saturating Rounded Shift Right Narrow (Immediate)
5123 defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
5124 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
5125                                                     SQRSHRNbhi, SQRSHRNhsi,
5126                                                     SQRSHRNsdi>;
5127
5128 // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
5129 defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
5130 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
5131                                                     UQRSHRNbhi, UQRSHRNhsi,
5132                                                     UQRSHRNsdi>;
5133
5134 // Signed Saturating Shift Right Unsigned Narrow (Immediate)
5135 defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
5136 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
5137                                                     SQSHRUNbhi, SQSHRUNhsi,
5138                                                     SQSHRUNsdi>;
5139
5140 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
5141 defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
5142 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
5143                                                     SQRSHRUNbhi, SQRSHRUNhsi,
5144                                                     SQRSHRUNsdi>;
5145
5146 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
5147 defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
5148 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxs2fp_n,
5149                                                   SCVTF_Nssi, SCVTF_Nddi>;
5150
5151 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
5152 defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
5153 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxu2fp_n,
5154                                                   UCVTF_Nssi, UCVTF_Nddi>;
5155
5156 // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
5157 defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
5158 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxs_n,
5159                                                   FCVTZS_Nssi, FCVTZS_Nddi>;
5160
5161 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
5162 defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
5163 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxu_n,
5164                                                   FCVTZU_Nssi, FCVTZU_Nddi>;
5165
5166 // Patterns For Convert Instructions Between v1f64 and v1i64
5167 class Neon_ScalarShiftImm_cvtf_v1f64_pattern<SDPatternOperator opnode,
5168                                              Instruction INST>
5169     : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5170           (INST FPR64:$Rn, imm:$Imm)>;
5171
5172 class Neon_ScalarShiftImm_fcvt_v1f64_pattern<SDPatternOperator opnode,
5173                                              Instruction INST>
5174     : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
5175           (INST FPR64:$Rn, imm:$Imm)>;
5176
5177 def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxs2fp,
5178                                              SCVTF_Nddi>;
5179
5180 def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxu2fp,
5181                                              UCVTF_Nddi>;
5182
5183 def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxs,
5184                                              FCVTZS_Nddi>;
5185
5186 def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxu,
5187                                              FCVTZU_Nddi>;
5188
5189 // Scalar Integer Add
5190 let isCommutable = 1 in {
5191 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
5192 }
5193
5194 // Scalar Integer Sub
5195 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
5196
5197 // Pattern for Scalar Integer Add and Sub with D register only
5198 defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
5199 defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
5200
5201 // Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
5202 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
5203 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
5204 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
5205 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
5206
5207 // Scalar Integer Saturating Add (Signed, Unsigned)
5208 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
5209 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
5210
5211 // Scalar Integer Saturating Sub (Signed, Unsigned)
5212 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
5213 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
5214
5215
5216 // Patterns to match llvm.aarch64.* intrinsic for
5217 // Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
5218 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqadds, SQADDbbb,
5219                                            SQADDhhh, SQADDsss, SQADDddd>;
5220 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqaddu, UQADDbbb,
5221                                            UQADDhhh, UQADDsss, UQADDddd>;
5222 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubs, SQSUBbbb,
5223                                            SQSUBhhh, SQSUBsss, SQSUBddd>;
5224 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubu, UQSUBbbb,
5225                                            UQSUBhhh, UQSUBsss, UQSUBddd>;
5226
5227 // Scalar Integer Saturating Doubling Multiply Half High
5228 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in
5229 defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
5230
5231 // Scalar Integer Saturating Rounding Doubling Multiply Half High
5232 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
5233 defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
5234 }
5235
5236 // Patterns to match llvm.arm.* intrinsic for
5237 // Scalar Integer Saturating Doubling Multiply Half High and
5238 // Scalar Integer Saturating Rounding Doubling Multiply Half High
5239 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
5240                                                                SQDMULHsss>;
5241 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
5242                                                                 SQRDMULHsss>;
5243
5244 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in {
5245 // Scalar Floating-point Multiply Extended
5246 defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
5247 }
5248
5249 // Scalar Floating-point Reciprocal Step
5250 defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
5251 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vrecps, f32, f32,
5252                                          FRECPSsss, f64, f64, FRECPSddd>;
5253 def : Pat<(v1f64 (int_arm_neon_vrecps (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5254           (FRECPSddd FPR64:$Rn, FPR64:$Rm)>;
5255
5256 // Scalar Floating-point Reciprocal Square Root Step
5257 defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
5258 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vrsqrts, f32, f32,
5259                                          FRSQRTSsss, f64, f64, FRSQRTSddd>;
5260 def : Pat<(v1f64 (int_arm_neon_vrsqrts (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5261           (FRSQRTSddd FPR64:$Rn, FPR64:$Rm)>;
5262 def : Pat<(v1f64 (fsqrt (v1f64 FPR64:$Rn))), (FSQRTdd FPR64:$Rn)>;
5263
5264 // Patterns to match llvm.aarch64.* intrinsic for
5265 // Scalar Floating-point Multiply Extended,
5266 multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
5267                                                   Instruction INSTS,
5268                                                   Instruction INSTD> {
5269   def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
5270             (INSTS FPR32:$Rn, FPR32:$Rm)>;
5271   def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
5272             (INSTD FPR64:$Rn, FPR64:$Rm)>;
5273 }
5274
5275 defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
5276                                               FMULXsss, FMULXddd>;
5277 def : Pat<(v1f64 (int_aarch64_neon_vmulx (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5278           (FMULXddd FPR64:$Rn, FPR64:$Rm)>;
5279
5280 // Scalar Integer Shift Left (Signed, Unsigned)
5281 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
5282 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
5283
5284 // Patterns to match llvm.arm.* intrinsic for
5285 // Scalar Integer Shift Left (Signed, Unsigned)
5286 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
5287 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
5288
5289 // Patterns to match llvm.aarch64.* intrinsic for
5290 // Scalar Integer Shift Left (Signed, Unsigned)
5291 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
5292 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
5293
5294 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
5295 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
5296 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
5297
5298 // Patterns to match llvm.aarch64.* intrinsic for
5299 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
5300 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
5301                                            SQSHLhhh, SQSHLsss, SQSHLddd>;
5302 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
5303                                            UQSHLhhh, UQSHLsss, UQSHLddd>;
5304
5305 // Patterns to match llvm.arm.* intrinsic for
5306 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
5307 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
5308 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
5309
5310 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
5311 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
5312 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
5313
5314 // Patterns to match llvm.aarch64.* intrinsic for
5315 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
5316 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
5317 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
5318
5319 // Patterns to match llvm.arm.* intrinsic for
5320 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
5321 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
5322 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
5323
5324 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
5325 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
5326 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
5327
5328 // Patterns to match llvm.aarch64.* intrinsic for
5329 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
5330 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
5331                                            SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
5332 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
5333                                            UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
5334
5335 // Patterns to match llvm.arm.* intrinsic for
5336 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
5337 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
5338 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
5339
5340 let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC] in {
5341 // Signed Saturating Doubling Multiply-Add Long
5342 defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
5343 }
5344 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
5345                                             SQDMLALshh, SQDMLALdss>;
5346
5347 // Signed Saturating Doubling Multiply-Subtract Long
5348 let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC] in {
5349 defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
5350 }
5351 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
5352                                             SQDMLSLshh, SQDMLSLdss>;
5353
5354 // Signed Saturating Doubling Multiply Long
5355 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in {
5356 defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
5357 }
5358 defm : Neon_Scalar3Diff_HS_size_patterns<int_arm_neon_vqdmull,
5359                                          SQDMULLshh, SQDMULLdss>;
5360
5361 // Scalar Signed Integer Convert To Floating-point
5362 defm SCVTF  : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
5363 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtint2fps,
5364                                                  SCVTFss, SCVTFdd>;
5365
5366 // Scalar Unsigned Integer Convert To Floating-point
5367 defm UCVTF  : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
5368 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtint2fpu,
5369                                                  UCVTFss, UCVTFdd>;
5370
5371 // Scalar Floating-point Converts
5372 def FCVTXN : NeonI_Scalar2SameMisc_fcvtxn_D_size<0b1, 0b10110, "fcvtxn">;
5373 def : Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<int_aarch64_neon_fcvtxn,
5374                                                   FCVTXN>;
5375
5376 defm FCVTNS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11010, "fcvtns">;
5377 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtns,
5378                                                   FCVTNSss, FCVTNSdd>;
5379 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtns, FCVTNSdd>;
5380
5381 defm FCVTNU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11010, "fcvtnu">;
5382 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtnu,
5383                                                   FCVTNUss, FCVTNUdd>;
5384 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtnu, FCVTNUdd>;
5385
5386 defm FCVTMS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11011, "fcvtms">;
5387 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtms,
5388                                                   FCVTMSss, FCVTMSdd>;
5389 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtms, FCVTMSdd>;
5390
5391 defm FCVTMU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11011, "fcvtmu">;
5392 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtmu,
5393                                                   FCVTMUss, FCVTMUdd>;
5394 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtmu, FCVTMUdd>;
5395
5396 defm FCVTAS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11100, "fcvtas">;
5397 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtas,
5398                                                   FCVTASss, FCVTASdd>;
5399 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtas, FCVTASdd>;
5400
5401 defm FCVTAU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11100, "fcvtau">;
5402 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtau,
5403                                                   FCVTAUss, FCVTAUdd>;
5404 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtau, FCVTAUdd>;
5405
5406 defm FCVTPS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11010, "fcvtps">;
5407 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtps,
5408                                                   FCVTPSss, FCVTPSdd>;
5409 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtps, FCVTPSdd>;
5410
5411 defm FCVTPU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11010, "fcvtpu">;
5412 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtpu,
5413                                                   FCVTPUss, FCVTPUdd>;
5414 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtpu, FCVTPUdd>;
5415
5416 defm FCVTZS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11011, "fcvtzs">;
5417 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzs,
5418                                                   FCVTZSss, FCVTZSdd>;
5419 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_aarch64_neon_vcvtzs,
5420                                                 FCVTZSdd>;
5421
5422 defm FCVTZU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11011, "fcvtzu">;
5423 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzu,
5424                                                   FCVTZUss, FCVTZUdd>;
5425 def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_aarch64_neon_vcvtzu,
5426                                                 FCVTZUdd>;
5427
5428 // Patterns For Convert Instructions Between v1f64 and v1i64
5429 class Neon_Scalar2SameMisc_cvtf_v1f64_pattern<SDPatternOperator opnode,
5430                                               Instruction INST>
5431     : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn))), (INST FPR64:$Rn)>;
5432
5433 class Neon_Scalar2SameMisc_fcvt_v1f64_pattern<SDPatternOperator opnode,
5434                                               Instruction INST>
5435     : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
5436
5437 def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<sint_to_fp, SCVTFdd>;
5438 def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<uint_to_fp, UCVTFdd>;
5439
5440 def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_sint, FCVTZSdd>;
5441 def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_uint, FCVTZUdd>;
5442
5443 // Scalar Floating-point Reciprocal Estimate
5444 defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
5445 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpe,
5446                                              FRECPEss, FRECPEdd>;
5447 def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrecpe,
5448                                               FRECPEdd>;
5449
5450 // Scalar Floating-point Reciprocal Exponent
5451 defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
5452 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
5453                                              FRECPXss, FRECPXdd>;
5454
5455 // Scalar Floating-point Reciprocal Square Root Estimate
5456 defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
5457 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrsqrte,
5458                                                  FRSQRTEss, FRSQRTEdd>;
5459 def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrsqrte,
5460                                               FRSQRTEdd>;
5461
5462 // Scalar Floating-point Round
5463 class Neon_ScalarFloatRound_pattern<SDPatternOperator opnode, Instruction INST>
5464     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
5465
5466 def : Neon_ScalarFloatRound_pattern<fceil, FRINTPdd>;
5467 def : Neon_ScalarFloatRound_pattern<ffloor, FRINTMdd>;
5468 def : Neon_ScalarFloatRound_pattern<ftrunc, FRINTZdd>;
5469 def : Neon_ScalarFloatRound_pattern<frint, FRINTXdd>;
5470 def : Neon_ScalarFloatRound_pattern<fnearbyint, FRINTIdd>;
5471 def : Neon_ScalarFloatRound_pattern<frnd, FRINTAdd>;
5472 def : Neon_ScalarFloatRound_pattern<int_aarch64_neon_frintn, FRINTNdd>;
5473
5474 // Scalar Integer Compare
5475
5476 // Scalar Compare Bitwise Equal
5477 def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
5478 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
5479
5480 class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
5481                                               Instruction INSTD,
5482                                               CondCode CC>
5483   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
5484         (INSTD FPR64:$Rn, FPR64:$Rm)>;
5485
5486 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
5487
5488 // Scalar Compare Signed Greather Than Or Equal
5489 def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
5490 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
5491 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGEddd, SETGE>;
5492
5493 // Scalar Compare Unsigned Higher Or Same
5494 def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
5495 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
5496 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHSddd, SETUGE>;
5497
5498 // Scalar Compare Unsigned Higher
5499 def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
5500 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
5501 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHIddd, SETUGT>;
5502
5503 // Scalar Compare Signed Greater Than
5504 def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
5505 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
5506 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGTddd, SETGT>;
5507
5508 // Scalar Compare Bitwise Test Bits
5509 def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
5510 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
5511 defm : Neon_Scalar3Same_D_size_patterns<Neon_tst, CMTSTddd>;
5512
5513 // Scalar Compare Bitwise Equal To Zero
5514 def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
5515 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
5516                                                 CMEQddi>;
5517 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETEQ, CMEQddi>;
5518
5519 // Scalar Compare Signed Greather Than Or Equal To Zero
5520 def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
5521 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
5522                                                 CMGEddi>;
5523 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGE, CMGEddi>;
5524
5525 // Scalar Compare Signed Greater Than Zero
5526 def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
5527 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
5528                                                 CMGTddi>;
5529 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGT, CMGTddi>;
5530
5531 // Scalar Compare Signed Less Than Or Equal To Zero
5532 def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
5533 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
5534                                                 CMLEddi>;
5535 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLE, CMLEddi>;
5536
5537 // Scalar Compare Less Than Zero
5538 def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
5539 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
5540                                                 CMLTddi>;
5541 def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLT, CMLTddi>;
5542
5543 // Scalar Floating-point Compare
5544
5545 // Scalar Floating-point Compare Mask Equal
5546 defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
5547 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fceq, v1i32, f32,
5548                                          FCMEQsss, v1i64, f64, FCMEQddd>;
5549 def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
5550
5551 // Scalar Floating-point Compare Mask Equal To Zero
5552 defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
5553 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fceq, SETEQ,
5554                                                   FCMEQZssi, FCMEQZddi>;
5555
5556 // Scalar Floating-point Compare Mask Greater Than Or Equal
5557 defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
5558 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcge, v1i32, f32,
5559                                          FCMGEsss, v1i64, f64, FCMGEddd>;
5560 def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGE, FCMGEddd>;
5561
5562 // Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
5563 defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
5564 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcge, SETGE,
5565                                                   FCMGEZssi, FCMGEZddi>;
5566
5567 // Scalar Floating-point Compare Mask Greather Than
5568 defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
5569 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcgt, v1i32, f32,
5570                                          FCMGTsss, v1i64, f64, FCMGTddd>;
5571 def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGT, FCMGTddd>;
5572
5573 // Scalar Floating-point Compare Mask Greather Than Zero
5574 defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
5575 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcgt, SETGT,
5576                                                   FCMGTZssi, FCMGTZddi>;
5577
5578 // Scalar Floating-point Compare Mask Less Than Or Equal To Zero
5579 defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
5580 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fclez, SETLE,
5581                                                   FCMLEZssi, FCMLEZddi>;
5582
5583 // Scalar Floating-point Compare Mask Less Than Zero
5584 defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
5585 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcltz, SETLT,
5586                                                   FCMLTZssi, FCMLTZddi>;
5587
5588 // Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
5589 defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
5590 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcage, v1i32, f32,
5591                                          FACGEsss, v1i64, f64, FACGEddd>;
5592 def : Pat<(v1i64 (int_arm_neon_vacge (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5593           (FACGEddd FPR64:$Rn, FPR64:$Rm)>;
5594
5595 // Scalar Floating-point Absolute Compare Mask Greater Than
5596 defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
5597 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcagt, v1i32, f32,
5598                                          FACGTsss, v1i64, f64, FACGTddd>;
5599 def : Pat<(v1i64 (int_arm_neon_vacgt (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
5600           (FACGTddd FPR64:$Rn, FPR64:$Rm)>;
5601
5602 // Scalar Floating-point Absolute Difference
5603 defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
5604 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vabd, f32, f32,
5605                                          FABDsss, f64, f64, FABDddd>;
5606
5607 // Scalar Absolute Value
5608 defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
5609 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
5610
5611 // Scalar Signed Saturating Absolute Value
5612 defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
5613 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
5614                                                SQABSbb, SQABShh, SQABSss, SQABSdd>;
5615
5616 // Scalar Negate
5617 defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
5618 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
5619
5620 // Scalar Signed Saturating Negate
5621 defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
5622 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
5623                                                SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
5624
5625 // Scalar Signed Saturating Accumulated of Unsigned Value
5626 defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
5627 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
5628                                                      SUQADDbb, SUQADDhh,
5629                                                      SUQADDss, SUQADDdd>;
5630
5631 // Scalar Unsigned Saturating Accumulated of Signed Value
5632 defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
5633 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
5634                                                      USQADDbb, USQADDhh,
5635                                                      USQADDss, USQADDdd>;
5636
5637 def : Pat<(v1i64 (int_aarch64_neon_suqadd (v1i64 FPR64:$Src),
5638                                           (v1i64 FPR64:$Rn))),
5639           (SUQADDdd FPR64:$Src, FPR64:$Rn)>;
5640
5641 def : Pat<(v1i64 (int_aarch64_neon_usqadd (v1i64 FPR64:$Src),
5642                                           (v1i64 FPR64:$Rn))),
5643           (USQADDdd FPR64:$Src, FPR64:$Rn)>;
5644
5645 def : Pat<(v1i64 (int_arm_neon_vabs (v1i64 FPR64:$Rn))),
5646           (ABSdd FPR64:$Rn)>;
5647
5648 def : Pat<(v1i64 (int_arm_neon_vqabs (v1i64 FPR64:$Rn))),
5649           (SQABSdd FPR64:$Rn)>;
5650
5651 def : Pat<(v1i64 (int_arm_neon_vqneg (v1i64 FPR64:$Rn))),
5652           (SQNEGdd FPR64:$Rn)>;
5653
5654 def : Pat<(v1i64 (sub (v1i64 (bitconvert (v8i8 Neon_AllZero))),
5655                       (v1i64 FPR64:$Rn))),
5656           (NEGdd FPR64:$Rn)>;
5657
5658 // Scalar Signed Saturating Extract Unsigned Narrow
5659 defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
5660 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
5661                                                      SQXTUNbh, SQXTUNhs,
5662                                                      SQXTUNsd>;
5663
5664 // Scalar Signed Saturating Extract Narrow
5665 defm SQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
5666 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
5667                                                      SQXTNbh, SQXTNhs,
5668                                                      SQXTNsd>;
5669
5670 // Scalar Unsigned Saturating Extract Narrow
5671 defm UQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
5672 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
5673                                                      UQXTNbh, UQXTNhs,
5674                                                      UQXTNsd>;
5675
5676 // Scalar Reduce Pairwise
5677
5678 multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
5679                                      string asmop, bit Commutable = 0> {
5680   let isCommutable = Commutable in {
5681     def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
5682                                 (outs FPR64:$Rd), (ins VPR128:$Rn),
5683                                 !strconcat(asmop, "\t$Rd, $Rn.2d"),
5684                                 [],
5685                                 NoItinerary>,
5686                 Sched<[WriteFPALU, ReadFPALU]>;
5687   }
5688 }
5689
5690 multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
5691                                      string asmop, bit Commutable = 0>
5692   : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
5693   let isCommutable = Commutable in {
5694     def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
5695                                 (outs FPR32:$Rd), (ins VPR64:$Rn),
5696                                 !strconcat(asmop, "\t$Rd, $Rn.2s"),
5697                                 [],
5698                                 NoItinerary>,
5699                 Sched<[WriteFPALU, ReadFPALU]>;
5700   }
5701 }
5702
5703 // Scalar Reduce Addition Pairwise (Integer) with
5704 // Pattern to match llvm.arm.* intrinsic
5705 defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
5706
5707 // Pattern to match llvm.aarch64.* intrinsic for
5708 // Scalar Reduce Addition Pairwise (Integer)
5709 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
5710           (ADDPvv_D_2D VPR128:$Rn)>;
5711 def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
5712           (ADDPvv_D_2D VPR128:$Rn)>;
5713
5714 // Scalar Reduce Addition Pairwise (Floating Point)
5715 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
5716
5717 // Scalar Reduce Maximum Pairwise (Floating Point)
5718 defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
5719
5720 // Scalar Reduce Minimum Pairwise (Floating Point)
5721 defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
5722
5723 // Scalar Reduce maxNum Pairwise (Floating Point)
5724 defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
5725
5726 // Scalar Reduce minNum Pairwise (Floating Point)
5727 defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
5728
5729 multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnode,
5730                                             Instruction INSTS,
5731                                             Instruction INSTD> {
5732   def : Pat<(f32 (opnode (v2f32 VPR64:$Rn))),
5733             (INSTS VPR64:$Rn)>;
5734   def : Pat<(f64 (opnode (v2f64 VPR128:$Rn))),
5735             (INSTD VPR128:$Rn)>;
5736 }
5737
5738 // Patterns to match llvm.aarch64.* intrinsic for
5739 // Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
5740 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
5741                                         FADDPvv_S_2S, FADDPvv_D_2D>;
5742
5743 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
5744                                         FMAXPvv_S_2S, FMAXPvv_D_2D>;
5745
5746 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
5747                                         FMINPvv_S_2S, FMINPvv_D_2D>;
5748
5749 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
5750                                         FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
5751
5752 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
5753                                         FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
5754
5755 def : Pat<(f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))),
5756           (FADDPvv_S_2S (v2f32
5757                (EXTRACT_SUBREG
5758                    (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
5759                    sub_64)))>;
5760
5761 // Scalar by element Arithmetic
5762
5763 class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
5764                                     string rmlane, bit u, bit szhi, bit szlo,
5765                                     RegisterClass ResFPR, RegisterClass OpFPR,
5766                                     RegisterOperand OpVPR, Operand OpImm>
5767   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
5768                              (outs ResFPR:$Rd),
5769                              (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
5770                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
5771                              [],
5772                              NoItinerary>,
5773     Sched<[WriteFPMul, ReadFPMul, ReadFPMul]> {
5774   bits<3> Imm;
5775   bits<5> MRm;
5776 }
5777
5778 class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
5779                                                     string rmlane,
5780                                                     bit u, bit szhi, bit szlo,
5781                                                     RegisterClass ResFPR,
5782                                                     RegisterClass OpFPR,
5783                                                     RegisterOperand OpVPR,
5784                                                     Operand OpImm>
5785   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
5786                              (outs ResFPR:$Rd),
5787                              (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
5788                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
5789                              [],
5790                              NoItinerary>,
5791     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
5792   let Constraints = "$src = $Rd";
5793   bits<3> Imm;
5794   bits<5> MRm;
5795 }
5796
5797 // Scalar Floating Point  multiply (scalar, by element)
5798 def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
5799   0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5800   let Inst{11} = Imm{1}; // h
5801   let Inst{21} = Imm{0}; // l
5802   let Inst{20-16} = MRm;
5803 }
5804 def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
5805   0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5806   let Inst{11} = Imm{0}; // h
5807   let Inst{21} = 0b0;    // l
5808   let Inst{20-16} = MRm;
5809 }
5810
5811 // Scalar Floating Point  multiply extended (scalar, by element)
5812 def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
5813   0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5814   let Inst{11} = Imm{1}; // h
5815   let Inst{21} = Imm{0}; // l
5816   let Inst{20-16} = MRm;
5817 }
5818 def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
5819   0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5820   let Inst{11} = Imm{0}; // h
5821   let Inst{21} = 0b0;    // l
5822   let Inst{20-16} = MRm;
5823 }
5824
5825 multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
5826   SDPatternOperator opnode,
5827   Instruction INST,
5828   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
5829   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5830
5831   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
5832                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
5833              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5834
5835   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
5836                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
5837              (ResTy (INST (ResTy FPRC:$Rn),
5838                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5839                OpNImm:$Imm))>;
5840
5841   // swapped operands
5842   def  : Pat<(ResTy (opnode
5843                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5844                (ResTy FPRC:$Rn))),
5845              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5846
5847   def  : Pat<(ResTy (opnode
5848                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5849                (ResTy FPRC:$Rn))),
5850              (ResTy (INST (ResTy FPRC:$Rn),
5851                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5852                OpNImm:$Imm))>;
5853 }
5854
5855 // Patterns for Scalar Floating Point  multiply (scalar, by element)
5856 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
5857   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5858 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
5859   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5860
5861 // Patterns for Scalar Floating Point  multiply extended (scalar, by element)
5862 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
5863   FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
5864   v2f32, v4f32, neon_uimm1_bare>;
5865 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
5866   FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
5867   v1f64, v2f64, neon_uimm0_bare>;
5868
5869 // Scalar Floating Point fused multiply-add (scalar, by element)
5870 def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
5871   0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5872   let Inst{11} = Imm{1}; // h
5873   let Inst{21} = Imm{0}; // l
5874   let Inst{20-16} = MRm;
5875 }
5876 def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
5877   0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5878   let Inst{11} = Imm{0}; // h
5879   let Inst{21} = 0b0;    // l
5880   let Inst{20-16} = MRm;
5881 }
5882
5883 // Scalar Floating Point fused multiply-subtract (scalar, by element)
5884 def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
5885   0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5886   let Inst{11} = Imm{1}; // h
5887   let Inst{21} = Imm{0}; // l
5888   let Inst{20-16} = MRm;
5889 }
5890 def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
5891   0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5892   let Inst{11} = Imm{0}; // h
5893   let Inst{21} = 0b0;    // l
5894   let Inst{20-16} = MRm;
5895 }
5896 // We are allowed to match the fma instruction regardless of compile options.
5897 multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
5898   Instruction FMLAI, Instruction FMLSI,
5899   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
5900   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5901   // fmla
5902   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5903                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5904                (ResTy FPRC:$Ra))),
5905              (ResTy (FMLAI (ResTy FPRC:$Ra),
5906                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5907
5908   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5909                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5910                (ResTy FPRC:$Ra))),
5911              (ResTy (FMLAI (ResTy FPRC:$Ra),
5912                (ResTy FPRC:$Rn),
5913                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5914                OpNImm:$Imm))>;
5915
5916   // swapped fmla operands
5917   def  : Pat<(ResTy (fma
5918                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5919                (ResTy FPRC:$Rn),
5920                (ResTy FPRC:$Ra))),
5921              (ResTy (FMLAI (ResTy FPRC:$Ra),
5922                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5923
5924   def  : Pat<(ResTy (fma
5925                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5926                (ResTy FPRC:$Rn),
5927                (ResTy FPRC:$Ra))),
5928              (ResTy (FMLAI (ResTy FPRC:$Ra),
5929                (ResTy FPRC:$Rn),
5930                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5931                OpNImm:$Imm))>;
5932
5933   // fmls
5934   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5935                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
5936                (ResTy FPRC:$Ra))),
5937              (ResTy (FMLSI (ResTy FPRC:$Ra),
5938                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5939
5940   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5941                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
5942                (ResTy FPRC:$Ra))),
5943              (ResTy (FMLSI (ResTy FPRC:$Ra),
5944                (ResTy FPRC:$Rn),
5945                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5946                OpNImm:$Imm))>;
5947
5948   // swapped fmls operands
5949   def  : Pat<(ResTy (fma
5950                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
5951                (ResTy FPRC:$Rn),
5952                (ResTy FPRC:$Ra))),
5953              (ResTy (FMLSI (ResTy FPRC:$Ra),
5954                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5955
5956   def  : Pat<(ResTy (fma
5957                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
5958                (ResTy FPRC:$Rn),
5959                (ResTy FPRC:$Ra))),
5960              (ResTy (FMLSI (ResTy FPRC:$Ra),
5961                (ResTy FPRC:$Rn),
5962                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5963                OpNImm:$Imm))>;
5964 }
5965
5966 // Scalar Floating Point fused multiply-add and
5967 // multiply-subtract (scalar, by element)
5968 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
5969   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5970 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
5971   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5972 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
5973   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5974
5975 // Scalar Signed saturating doubling multiply long (scalar, by element)
5976 def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5977   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5978   let Inst{11} = 0b0; // h
5979   let Inst{21} = Imm{1}; // l
5980   let Inst{20} = Imm{0}; // m
5981   let Inst{19-16} = MRm{3-0};
5982 }
5983 def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5984   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5985   let Inst{11} = Imm{2}; // h
5986   let Inst{21} = Imm{1}; // l
5987   let Inst{20} = Imm{0}; // m
5988   let Inst{19-16} = MRm{3-0};
5989 }
5990 def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5991   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5992   let Inst{11} = 0b0;    // h
5993   let Inst{21} = Imm{0}; // l
5994   let Inst{20-16} = MRm;
5995 }
5996 def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5997   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5998   let Inst{11} = Imm{1};    // h
5999   let Inst{21} = Imm{0};    // l
6000   let Inst{20-16} = MRm;
6001 }
6002
6003 multiclass Neon_ScalarXIndexedElem_MUL_Patterns<
6004   SDPatternOperator opnode,
6005   Instruction INST,
6006   ValueType ResTy, RegisterClass FPRC,
6007   ValueType OpVTy, ValueType OpTy,
6008   ValueType VecOpTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
6009
6010   def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
6011                (OpVTy (scalar_to_vector
6012                  (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))))),
6013              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6014
6015   def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
6016                (OpVTy (extract_subvector (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
6017              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6018
6019   //swapped operands
6020   def  : Pat<(ResTy (opnode
6021                (OpVTy (scalar_to_vector
6022                  (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
6023                  (OpVTy FPRC:$Rn))),
6024              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6025
6026   def  : Pat<(ResTy (opnode
6027                (OpVTy (extract_subvector (VecOpTy VPRC:$MRm), OpImm:$Imm)),
6028                (OpVTy FPRC:$Rn))),
6029              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
6030 }
6031
6032
6033 // Patterns for Scalar Signed saturating doubling
6034 // multiply long (scalar, by element)
6035 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6036   SQDMULLshv_4H, v1i32, FPR16, v1i16, i16, v4i16,
6037   i32, VPR64Lo, neon_uimm2_bare>;
6038 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6039   SQDMULLshv_8H, v1i32, FPR16, v1i16, i16, v8i16,
6040   i32, VPR128Lo, neon_uimm3_bare>;
6041 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6042   SQDMULLdsv_2S, v1i64, FPR32, v1i32, i32, v2i32,
6043   i32, VPR64Lo, neon_uimm1_bare>;
6044 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
6045   SQDMULLdsv_4S, v1i64, FPR32, v1i32, i32, v4i32,
6046   i32, VPR128Lo, neon_uimm2_bare>;
6047
6048 // Scalar Signed saturating doubling multiply-add long (scalar, by element)
6049 def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6050   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
6051   let Inst{11} = 0b0; // h
6052   let Inst{21} = Imm{1}; // l
6053   let Inst{20} = Imm{0}; // m
6054   let Inst{19-16} = MRm{3-0};
6055 }
6056 def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6057   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
6058   let Inst{11} = Imm{2}; // h
6059   let Inst{21} = Imm{1}; // l
6060   let Inst{20} = Imm{0}; // m
6061   let Inst{19-16} = MRm{3-0};
6062 }
6063 def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6064   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
6065   let Inst{11} = 0b0;    // h
6066   let Inst{21} = Imm{0}; // l
6067   let Inst{20-16} = MRm;
6068 }
6069 def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
6070   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
6071   let Inst{11} = Imm{1};    // h
6072   let Inst{21} = Imm{0};    // l
6073   let Inst{20-16} = MRm;
6074 }
6075
6076 // Scalar Signed saturating doubling
6077 // multiply-subtract long (scalar, by element)
6078 def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6079   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
6080   let Inst{11} = 0b0; // h
6081   let Inst{21} = Imm{1}; // l
6082   let Inst{20} = Imm{0}; // m
6083   let Inst{19-16} = MRm{3-0};
6084 }
6085 def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6086   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
6087   let Inst{11} = Imm{2}; // h
6088   let Inst{21} = Imm{1}; // l
6089   let Inst{20} = Imm{0}; // m
6090   let Inst{19-16} = MRm{3-0};
6091 }
6092 def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6093   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
6094   let Inst{11} = 0b0;    // h
6095   let Inst{21} = Imm{0}; // l
6096   let Inst{20-16} = MRm;
6097 }
6098 def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
6099   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
6100   let Inst{11} = Imm{1};    // h
6101   let Inst{21} = Imm{0};    // l
6102   let Inst{20-16} = MRm;
6103 }
6104
6105 multiclass Neon_ScalarXIndexedElem_MLAL_Patterns<
6106   SDPatternOperator opnode,
6107   SDPatternOperator coreopnode,
6108   Instruction INST,
6109   ValueType ResTy, RegisterClass ResFPRC, RegisterClass FPRC,
6110   ValueType OpTy,
6111   ValueType OpVTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
6112
6113   def  : Pat<(ResTy (opnode
6114                (ResTy ResFPRC:$Ra),
6115                (ResTy (coreopnode (OpTy FPRC:$Rn),
6116                  (OpTy (scalar_to_vector
6117                    (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))))))),
6118              (ResTy (INST (ResTy ResFPRC:$Ra),
6119                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6120
6121   def  : Pat<(ResTy (opnode
6122                (ResTy ResFPRC:$Ra),
6123                (ResTy (coreopnode (OpTy FPRC:$Rn),
6124                  (OpTy (extract_subvector (OpVTy VPRC:$MRm), OpImm:$Imm)))))),
6125              (ResTy (INST (ResTy ResFPRC:$Ra),
6126                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6127
6128   // swapped operands
6129   def  : Pat<(ResTy (opnode
6130                (ResTy ResFPRC:$Ra),
6131                (ResTy (coreopnode
6132                  (OpTy (scalar_to_vector
6133                    (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))),
6134                  (OpTy FPRC:$Rn))))),
6135              (ResTy (INST (ResTy ResFPRC:$Ra),
6136                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6137
6138   def  : Pat<(ResTy (opnode
6139                (ResTy ResFPRC:$Ra),
6140                (ResTy (coreopnode
6141                  (OpTy (extract_subvector (OpVTy VPRC:$MRm), OpImm:$Imm)),
6142                  (OpTy FPRC:$Rn))))),
6143              (ResTy (INST (ResTy ResFPRC:$Ra),
6144                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
6145 }
6146
6147 // Patterns for Scalar Signed saturating
6148 // doubling multiply-add long (scalar, by element)
6149 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6150   int_arm_neon_vqdmull, SQDMLALshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
6151   i32, VPR64Lo, neon_uimm2_bare>;
6152 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6153   int_arm_neon_vqdmull, SQDMLALshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
6154   i32, VPR128Lo, neon_uimm3_bare>;
6155 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6156   int_arm_neon_vqdmull, SQDMLALdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
6157   i32, VPR64Lo, neon_uimm1_bare>;
6158 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
6159   int_arm_neon_vqdmull, SQDMLALdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
6160   i32, VPR128Lo, neon_uimm2_bare>;
6161
6162 // Patterns for Scalar Signed saturating
6163 // doubling multiply-sub long (scalar, by element)
6164 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6165   int_arm_neon_vqdmull, SQDMLSLshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
6166   i32, VPR64Lo, neon_uimm2_bare>;
6167 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6168   int_arm_neon_vqdmull, SQDMLSLshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
6169   i32, VPR128Lo, neon_uimm3_bare>;
6170 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6171   int_arm_neon_vqdmull, SQDMLSLdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
6172   i32, VPR64Lo, neon_uimm1_bare>;
6173 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
6174   int_arm_neon_vqdmull, SQDMLSLdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
6175   i32, VPR128Lo, neon_uimm2_bare>;
6176
6177 // Scalar Signed saturating doubling multiply returning
6178 // high half (scalar, by element)
6179 def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6180   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
6181   let Inst{11} = 0b0; // h
6182   let Inst{21} = Imm{1}; // l
6183   let Inst{20} = Imm{0}; // m
6184   let Inst{19-16} = MRm{3-0};
6185 }
6186 def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6187   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
6188   let Inst{11} = Imm{2}; // h
6189   let Inst{21} = Imm{1}; // l
6190   let Inst{20} = Imm{0}; // m
6191   let Inst{19-16} = MRm{3-0};
6192 }
6193 def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6194   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
6195   let Inst{11} = 0b0;    // h
6196   let Inst{21} = Imm{0}; // l
6197   let Inst{20-16} = MRm;
6198 }
6199 def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
6200   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
6201   let Inst{11} = Imm{1};    // h
6202   let Inst{21} = Imm{0};    // l
6203   let Inst{20-16} = MRm;
6204 }
6205
6206 // Patterns for Scalar Signed saturating doubling multiply returning
6207 // high half (scalar, by element)
6208 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6209   SQDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16,
6210   i32, VPR64Lo, neon_uimm2_bare>;
6211 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6212   SQDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16,
6213   i32, VPR128Lo, neon_uimm3_bare>;
6214 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6215   SQDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32,
6216   i32, VPR64Lo, neon_uimm1_bare>;
6217 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
6218   SQDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32,
6219   i32, VPR128Lo, neon_uimm2_bare>;
6220
6221 // Scalar Signed saturating rounding doubling multiply
6222 // returning high half (scalar, by element)
6223 def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6224   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
6225   let Inst{11} = 0b0; // h
6226   let Inst{21} = Imm{1}; // l
6227   let Inst{20} = Imm{0}; // m
6228   let Inst{19-16} = MRm{3-0};
6229 }
6230 def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6231   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
6232   let Inst{11} = Imm{2}; // h
6233   let Inst{21} = Imm{1}; // l
6234   let Inst{20} = Imm{0}; // m
6235   let Inst{19-16} = MRm{3-0};
6236 }
6237 def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6238   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
6239   let Inst{11} = 0b0;    // h
6240   let Inst{21} = Imm{0}; // l
6241   let Inst{20-16} = MRm;
6242 }
6243 def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
6244   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
6245   let Inst{11} = Imm{1};    // h
6246   let Inst{21} = Imm{0};    // l
6247   let Inst{20-16} = MRm;
6248 }
6249
6250 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6251   SQRDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16, i32,
6252   VPR64Lo, neon_uimm2_bare>;
6253 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6254   SQRDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16, i32,
6255   VPR128Lo, neon_uimm3_bare>;
6256 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6257   SQRDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32, i32,
6258   VPR64Lo, neon_uimm1_bare>;
6259 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
6260   SQRDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32, i32,
6261   VPR128Lo, neon_uimm2_bare>;
6262
6263 // Scalar general arithmetic operation
6264 class Neon_Scalar_GeneralMath2D_pattern<SDPatternOperator opnode,
6265                                         Instruction INST> 
6266     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
6267
6268 class Neon_Scalar_GeneralMath3D_pattern<SDPatternOperator opnode,
6269                                         Instruction INST> 
6270     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
6271           (INST FPR64:$Rn, FPR64:$Rm)>;
6272
6273 class Neon_Scalar_GeneralMath4D_pattern<SDPatternOperator opnode,
6274                                         Instruction INST> 
6275     : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm),
6276               (v1f64 FPR64:$Ra))),
6277           (INST FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
6278
6279 def : Neon_Scalar_GeneralMath3D_pattern<fadd, FADDddd>;
6280 def : Neon_Scalar_GeneralMath3D_pattern<fmul, FMULddd>;
6281 def : Neon_Scalar_GeneralMath3D_pattern<fsub, FSUBddd>;
6282 def : Neon_Scalar_GeneralMath3D_pattern<fdiv, FDIVddd>;
6283 def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vabds, FABDddd>;
6284 def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmaxs, FMAXddd>;
6285 def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmins, FMINddd>;
6286 def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vmaxnm, FMAXNMddd>;
6287 def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vminnm, FMINNMddd>;
6288
6289 def : Neon_Scalar_GeneralMath2D_pattern<fabs, FABSdd>;
6290 def : Neon_Scalar_GeneralMath2D_pattern<fneg, FNEGdd>;
6291
6292 def : Neon_Scalar_GeneralMath4D_pattern<fma, FMADDdddd>;
6293 def : Neon_Scalar_GeneralMath4D_pattern<fmsub, FMSUBdddd>;
6294
6295 // Scalar Copy - DUP element to scalar
6296 class NeonI_Scalar_DUP<string asmop, string asmlane,
6297                        RegisterClass ResRC, RegisterOperand VPRC,
6298                        Operand OpImm>
6299   : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
6300                      asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
6301                      [],
6302                      NoItinerary>,
6303     Sched<[WriteFPALU, ReadFPALU]> {
6304   bits<4> Imm;
6305 }
6306
6307 def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
6308   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6309 }
6310 def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
6311   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6312 }
6313 def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
6314   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6315 }
6316 def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
6317   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6318 }
6319
6320 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 0)),
6321           (f32 (EXTRACT_SUBREG (v4f32 VPR128:$Rn), sub_32))>;
6322 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 1)),
6323           (f32 (DUPsv_S (v4f32 VPR128:$Rn), 1))>;
6324 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 2)),
6325           (f32 (DUPsv_S (v4f32 VPR128:$Rn), 2))>;
6326 def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 3)),
6327           (f32 (DUPsv_S (v4f32 VPR128:$Rn), 3))>;
6328
6329 def : Pat<(f64 (vector_extract (v2f64 VPR128:$Rn), 0)),
6330           (f64 (EXTRACT_SUBREG (v2f64 VPR128:$Rn), sub_64))>;
6331 def : Pat<(f64 (vector_extract (v2f64 VPR128:$Rn), 1)),
6332           (f64 (DUPdv_D (v2f64 VPR128:$Rn), 1))>;
6333
6334 def : Pat<(f32 (vector_extract (v2f32 VPR64:$Rn), 0)),
6335           (f32 (EXTRACT_SUBREG (v2f32 VPR64:$Rn), sub_32))>;
6336 def : Pat<(f32 (vector_extract (v2f32 VPR64:$Rn), 1)),
6337           (f32 (DUPsv_S (v4f32 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6338             1))>;
6339
6340 def : Pat<(f64 (vector_extract (v1f64 VPR64:$Rn), 0)),
6341           (f64 (EXTRACT_SUBREG (v1f64 VPR64:$Rn), sub_64))>;
6342
6343 multiclass NeonI_Scalar_DUP_Ext_Vec_pattern<Instruction DUPI,
6344   ValueType ResTy, ValueType OpTy,Operand OpLImm,
6345   ValueType NOpTy, ValueType ExTy, Operand OpNImm> {
6346
6347   def : Pat<(ResTy (extract_subvector (OpTy VPR128:$Rn), OpLImm:$Imm)),
6348             (ResTy (DUPI VPR128:$Rn, OpLImm:$Imm))>;
6349
6350   def : Pat<(ResTy (extract_subvector (NOpTy VPR64:$Rn), OpNImm:$Imm)),
6351             (ResTy (DUPI
6352               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6353                 OpNImm:$Imm))>;
6354 }
6355
6356 // Patterns for extract subvectors of v1ix data using scalar DUP instructions.
6357 defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPbv_B, v1i8, v16i8, neon_uimm4_bare,
6358                                         v8i8, v16i8, neon_uimm3_bare>;
6359 defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPhv_H, v1i16, v8i16, neon_uimm3_bare,
6360                                         v4i16, v8i16, neon_uimm2_bare>;
6361 defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPsv_S, v1i32, v4i32, neon_uimm2_bare,
6362                                         v2i32, v4i32, neon_uimm1_bare>;
6363
6364 multiclass NeonI_Scalar_DUP_Copy_pattern1<Instruction DUPI, ValueType ResTy,
6365                                           ValueType OpTy, ValueType ElemTy,
6366                                           Operand OpImm, ValueType OpNTy,
6367                                           ValueType ExTy, Operand OpNImm> {
6368
6369   def : Pat<(ResTy (vector_insert (ResTy undef),
6370               (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
6371               (neon_uimm0_bare:$Imm))),
6372             (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
6373
6374   def : Pat<(ResTy (vector_insert (ResTy undef),
6375               (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
6376               (OpNImm:$Imm))),
6377             (ResTy (DUPI
6378               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6379               OpNImm:$Imm))>;
6380 }
6381
6382 multiclass NeonI_Scalar_DUP_Copy_pattern2<Instruction DUPI, ValueType ResTy,
6383                                           ValueType OpTy, ValueType ElemTy,
6384                                           Operand OpImm, ValueType OpNTy,
6385                                           ValueType ExTy, Operand OpNImm> {
6386
6387   def : Pat<(ResTy (scalar_to_vector
6388               (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)))),
6389             (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
6390
6391   def : Pat<(ResTy (scalar_to_vector
6392               (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)))),
6393             (ResTy (DUPI
6394               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6395               OpNImm:$Imm))>;
6396 }
6397
6398 // Patterns for vector copy to v1ix and v1fx vectors using scalar DUP
6399 // instructions.
6400 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
6401   v1i64, v2i64, i64, neon_uimm1_bare,
6402   v1i64, v2i64, neon_uimm0_bare>;
6403 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
6404   v1i32, v4i32, i32, neon_uimm2_bare,
6405   v2i32, v4i32, neon_uimm1_bare>;
6406 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPhv_H,
6407   v1i16, v8i16, i32, neon_uimm3_bare,
6408   v4i16, v8i16, neon_uimm2_bare>;
6409 defm : NeonI_Scalar_DUP_Copy_pattern1<DUPbv_B,
6410   v1i8, v16i8, i32, neon_uimm4_bare,
6411   v8i8, v16i8, neon_uimm3_bare>;
6412 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
6413   v1i64, v2i64, i64, neon_uimm1_bare,
6414   v1i64, v2i64, neon_uimm0_bare>;
6415 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
6416   v1i32, v4i32, i32, neon_uimm2_bare,
6417   v2i32, v4i32, neon_uimm1_bare>;
6418 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPhv_H,
6419   v1i16, v8i16, i32, neon_uimm3_bare,
6420   v4i16, v8i16, neon_uimm2_bare>;
6421 defm : NeonI_Scalar_DUP_Copy_pattern2<DUPbv_B,
6422   v1i8, v16i8, i32, neon_uimm4_bare,
6423   v8i8, v16i8, neon_uimm3_bare>;
6424
6425 multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
6426                                   Instruction DUPI, Operand OpImm,
6427                                   RegisterClass ResRC> {
6428   def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn" # asmlane # "[$Imm]"),
6429           (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
6430 }
6431
6432 // Aliases for Scalar copy - DUP element (scalar)
6433 // FIXME: This is actually the preferred syntax but TableGen can't deal with
6434 // custom printing of aliases.
6435 defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
6436 defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
6437 defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
6438 defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
6439
6440 multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh, ValueType ResTy,
6441                       ValueType OpTy> {
6442   def : Pat<(ResTy (GetLow VPR128:$Rn)),
6443             (ResTy (EXTRACT_SUBREG (OpTy VPR128:$Rn), sub_64))>;
6444   def : Pat<(ResTy (GetHigh VPR128:$Rn)),
6445             (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
6446 }
6447
6448 defm : NeonI_SDUP<Neon_Low16B, Neon_High16B, v8i8, v16i8>;
6449 defm : NeonI_SDUP<Neon_Low8H, Neon_High8H, v4i16, v8i16>;
6450 defm : NeonI_SDUP<Neon_Low4S, Neon_High4S, v2i32, v4i32>;
6451 defm : NeonI_SDUP<Neon_Low2D, Neon_High2D, v1i64, v2i64>;
6452 defm : NeonI_SDUP<Neon_Low4float, Neon_High4float, v2f32, v4f32>;
6453 defm : NeonI_SDUP<Neon_Low2double, Neon_High2double, v1f64, v2f64>;
6454
6455 // The following is for sext/zext from v1xx to v1xx
6456 multiclass NeonI_ext<string prefix, SDNode ExtOp> {
6457   // v1i32 -> v1i64
6458   def : Pat<(v1i64 (ExtOp (v1i32 FPR32:$Rn))),
6459             (EXTRACT_SUBREG 
6460               (v2i64 (!cast<Instruction>(prefix # "_2S")
6461                 (v2i32 (SUBREG_TO_REG (i64 0), $Rn, sub_32)), 0)),
6462               sub_64)>;
6463   
6464   // v1i16 -> v1i32
6465   def : Pat<(v1i32 (ExtOp (v1i16 FPR16:$Rn))),
6466             (EXTRACT_SUBREG 
6467               (v4i32 (!cast<Instruction>(prefix # "_4H")
6468                 (v4i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)), 0)),
6469               sub_32)>;
6470   
6471   // v1i8 -> v1i16
6472   def : Pat<(v1i16 (ExtOp (v1i8 FPR8:$Rn))),
6473             (EXTRACT_SUBREG 
6474               (v8i16 (!cast<Instruction>(prefix # "_8B")
6475                 (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
6476               sub_16)>;
6477 }
6478
6479 defm NeonI_zext : NeonI_ext<"USHLLvvi", zext>;
6480 defm NeonI_sext : NeonI_ext<"SSHLLvvi", sext>;
6481
6482 // zext v1i8 -> v1i32
6483 def : Pat<(v1i32 (zext (v1i8 FPR8:$Rn))),
6484           (v1i32 (EXTRACT_SUBREG
6485             (v1i64 (SUBREG_TO_REG (i64 0),
6486               (v1i8 (DUPbv_B
6487                 (v16i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)),
6488                 0)),
6489               sub_8)),
6490             sub_32))>;
6491
6492 // zext v1i8 -> v1i64
6493 def : Pat<(v1i64 (zext (v1i8 FPR8:$Rn))),
6494           (v1i64 (SUBREG_TO_REG (i64 0),
6495             (v1i8 (DUPbv_B
6496               (v16i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)),
6497               0)),
6498             sub_8))>;
6499
6500 // zext v1i16 -> v1i64
6501 def : Pat<(v1i64 (zext (v1i16 FPR16:$Rn))),
6502           (v1i64 (SUBREG_TO_REG (i64 0),
6503             (v1i16 (DUPhv_H
6504               (v8i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)),
6505               0)),
6506             sub_16))>;
6507
6508 // sext v1i8 -> v1i32
6509 def : Pat<(v1i32 (sext (v1i8 FPR8:$Rn))),
6510           (EXTRACT_SUBREG
6511             (v4i32 (SSHLLvvi_4H
6512               (v4i16 (SUBREG_TO_REG (i64 0),
6513                 (v1i16 (EXTRACT_SUBREG 
6514                   (v8i16 (SSHLLvvi_8B
6515                     (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
6516                   sub_16)),
6517                 sub_16)), 0)),
6518             sub_32)>;
6519               
6520 // sext v1i8 -> v1i64
6521 def : Pat<(v1i64 (sext (v1i8 FPR8:$Rn))),
6522           (EXTRACT_SUBREG 
6523             (v2i64 (SSHLLvvi_2S
6524               (v2i32 (SUBREG_TO_REG (i64 0),
6525                 (v1i32 (EXTRACT_SUBREG
6526                   (v4i32 (SSHLLvvi_4H
6527                     (v4i16 (SUBREG_TO_REG (i64 0),
6528                       (v1i16 (EXTRACT_SUBREG 
6529                         (v8i16 (SSHLLvvi_8B
6530                           (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
6531                         sub_16)),
6532                       sub_16)), 0)),
6533                   sub_32)),
6534                 sub_32)), 0)),
6535             sub_64)>;
6536
6537   
6538 // sext v1i16 -> v1i64
6539 def : Pat<(v1i64 (sext (v1i16 FPR16:$Rn))),
6540           (EXTRACT_SUBREG
6541             (v2i64 (SSHLLvvi_2S
6542               (v2i32 (SUBREG_TO_REG (i64 0),
6543                 (v1i32 (EXTRACT_SUBREG 
6544                   (v4i32 (SSHLLvvi_4H
6545                     (v4i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)), 0)),
6546                   sub_32)),
6547                 sub_32)), 0)),
6548             sub_64)>;
6549
6550 //===----------------------------------------------------------------------===//
6551 // Non-Instruction Patterns
6552 //===----------------------------------------------------------------------===//
6553
6554 // 64-bit vector bitcasts...
6555
6556 def : Pat<(v1i64 (bitconvert (v8i8  VPR64:$src))), (v1i64 VPR64:$src)>;
6557 def : Pat<(v2f32 (bitconvert (v8i8  VPR64:$src))), (v2f32 VPR64:$src)>;
6558 def : Pat<(v2i32 (bitconvert (v8i8  VPR64:$src))), (v2i32 VPR64:$src)>;
6559 def : Pat<(v4i16 (bitconvert (v8i8  VPR64:$src))), (v4i16 VPR64:$src)>;
6560
6561 def : Pat<(v1i64 (bitconvert (v4i16  VPR64:$src))), (v1i64 VPR64:$src)>;
6562 def : Pat<(v2i32 (bitconvert (v4i16  VPR64:$src))), (v2i32 VPR64:$src)>;
6563 def : Pat<(v2f32 (bitconvert (v4i16  VPR64:$src))), (v2f32 VPR64:$src)>;
6564 def : Pat<(v8i8  (bitconvert (v4i16  VPR64:$src))), (v8i8 VPR64:$src)>;
6565
6566 def : Pat<(v1i64 (bitconvert (v2i32  VPR64:$src))), (v1i64 VPR64:$src)>;
6567 def : Pat<(v2f32 (bitconvert (v2i32  VPR64:$src))), (v2f32 VPR64:$src)>;
6568 def : Pat<(v4i16 (bitconvert (v2i32  VPR64:$src))), (v4i16 VPR64:$src)>;
6569 def : Pat<(v8i8  (bitconvert (v2i32  VPR64:$src))), (v8i8 VPR64:$src)>;
6570
6571 def : Pat<(v1i64 (bitconvert (v2f32  VPR64:$src))), (v1i64 VPR64:$src)>;
6572 def : Pat<(v2i32 (bitconvert (v2f32  VPR64:$src))), (v2i32 VPR64:$src)>;
6573 def : Pat<(v4i16 (bitconvert (v2f32  VPR64:$src))), (v4i16 VPR64:$src)>;
6574 def : Pat<(v8i8  (bitconvert (v2f32  VPR64:$src))), (v8i8 VPR64:$src)>;
6575
6576 def : Pat<(v2f32 (bitconvert (v1i64  VPR64:$src))), (v2f32 VPR64:$src)>;
6577 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
6578 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
6579 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
6580
6581 def : Pat<(v1i64 (bitconvert (v1f64  VPR64:$src))), (v1i64 VPR64:$src)>;
6582 def : Pat<(v2f32 (bitconvert (v1f64  VPR64:$src))), (v2f32 VPR64:$src)>;
6583 def : Pat<(v2i32 (bitconvert (v1f64  VPR64:$src))), (v2i32 VPR64:$src)>;
6584 def : Pat<(v4i16 (bitconvert (v1f64  VPR64:$src))), (v4i16 VPR64:$src)>;
6585 def : Pat<(v8i8 (bitconvert (v1f64  VPR64:$src))), (v8i8 VPR64:$src)>;
6586 def : Pat<(f64   (bitconvert (v1f64  VPR64:$src))), (f64 VPR64:$src)>;
6587
6588 def : Pat<(v1f64 (bitconvert (v1i64  VPR64:$src))), (v1f64 VPR64:$src)>;
6589 def : Pat<(v1f64 (bitconvert (v2f32  VPR64:$src))), (v1f64 VPR64:$src)>;
6590 def : Pat<(v1f64 (bitconvert (v2i32  VPR64:$src))), (v1f64 VPR64:$src)>;
6591 def : Pat<(v1f64 (bitconvert (v4i16  VPR64:$src))), (v1f64 VPR64:$src)>;
6592 def : Pat<(v1f64 (bitconvert (v8i8  VPR64:$src))), (v1f64 VPR64:$src)>;
6593 def : Pat<(v1f64 (bitconvert (f64  VPR64:$src))), (v1f64 VPR64:$src)>;
6594
6595 // ..and 128-bit vector bitcasts...
6596
6597 def : Pat<(v2f64 (bitconvert (v16i8  VPR128:$src))), (v2f64 VPR128:$src)>;
6598 def : Pat<(v2i64 (bitconvert (v16i8  VPR128:$src))), (v2i64 VPR128:$src)>;
6599 def : Pat<(v4f32 (bitconvert (v16i8  VPR128:$src))), (v4f32 VPR128:$src)>;
6600 def : Pat<(v4i32 (bitconvert (v16i8  VPR128:$src))), (v4i32 VPR128:$src)>;
6601 def : Pat<(v8i16 (bitconvert (v16i8  VPR128:$src))), (v8i16 VPR128:$src)>;
6602
6603 def : Pat<(v2f64 (bitconvert (v8i16  VPR128:$src))), (v2f64 VPR128:$src)>;
6604 def : Pat<(v2i64 (bitconvert (v8i16  VPR128:$src))), (v2i64 VPR128:$src)>;
6605 def : Pat<(v4i32 (bitconvert (v8i16  VPR128:$src))), (v4i32 VPR128:$src)>;
6606 def : Pat<(v4f32 (bitconvert (v8i16  VPR128:$src))), (v4f32 VPR128:$src)>;
6607 def : Pat<(v16i8 (bitconvert (v8i16  VPR128:$src))), (v16i8 VPR128:$src)>;
6608
6609 def : Pat<(v2f64 (bitconvert (v4i32  VPR128:$src))), (v2f64 VPR128:$src)>;
6610 def : Pat<(v2i64 (bitconvert (v4i32  VPR128:$src))), (v2i64 VPR128:$src)>;
6611 def : Pat<(v4f32 (bitconvert (v4i32  VPR128:$src))), (v4f32 VPR128:$src)>;
6612 def : Pat<(v8i16 (bitconvert (v4i32  VPR128:$src))), (v8i16 VPR128:$src)>;
6613 def : Pat<(v16i8 (bitconvert (v4i32  VPR128:$src))), (v16i8 VPR128:$src)>;
6614
6615 def : Pat<(v2f64 (bitconvert (v4f32  VPR128:$src))), (v2f64 VPR128:$src)>;
6616 def : Pat<(v2i64 (bitconvert (v4f32  VPR128:$src))), (v2i64 VPR128:$src)>;
6617 def : Pat<(v4i32 (bitconvert (v4f32  VPR128:$src))), (v4i32 VPR128:$src)>;
6618 def : Pat<(v8i16 (bitconvert (v4f32  VPR128:$src))), (v8i16 VPR128:$src)>;
6619 def : Pat<(v16i8 (bitconvert (v4f32  VPR128:$src))), (v16i8 VPR128:$src)>;
6620
6621 def : Pat<(v2f64 (bitconvert (v2i64  VPR128:$src))), (v2f64 VPR128:$src)>;
6622 def : Pat<(v4f32 (bitconvert (v2i64  VPR128:$src))), (v4f32 VPR128:$src)>;
6623 def : Pat<(v4i32 (bitconvert (v2i64  VPR128:$src))), (v4i32 VPR128:$src)>;
6624 def : Pat<(v8i16 (bitconvert (v2i64  VPR128:$src))), (v8i16 VPR128:$src)>;
6625 def : Pat<(v16i8 (bitconvert (v2i64  VPR128:$src))), (v16i8 VPR128:$src)>;
6626
6627 def : Pat<(v2i64 (bitconvert (v2f64  VPR128:$src))), (v2i64 VPR128:$src)>;
6628 def : Pat<(v4f32 (bitconvert (v2f64  VPR128:$src))), (v4f32 VPR128:$src)>;
6629 def : Pat<(v4i32 (bitconvert (v2f64  VPR128:$src))), (v4i32 VPR128:$src)>;
6630 def : Pat<(v8i16 (bitconvert (v2f64  VPR128:$src))), (v8i16 VPR128:$src)>;
6631 def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
6632
6633 // ...and scalar bitcasts...
6634 def : Pat<(f16 (bitconvert (v1i16  FPR16:$src))), (f16 FPR16:$src)>;
6635 def : Pat<(f32 (bitconvert (v1i32  FPR32:$src))), (f32 FPR32:$src)>;
6636 def : Pat<(f64 (bitconvert (v1i64  FPR64:$src))), (f64 FPR64:$src)>;
6637 def : Pat<(f64 (bitconvert (v1f64  FPR64:$src))), (f64 FPR64:$src)>;
6638
6639 def : Pat<(i64 (bitconvert (v1i64  FPR64:$src))), (FMOVxd $src)>;
6640 def : Pat<(i64 (bitconvert (v1f64  FPR64:$src))), (FMOVxd $src)>;
6641 def : Pat<(i64 (bitconvert (v2i32  FPR64:$src))), (FMOVxd $src)>;
6642 def : Pat<(i64 (bitconvert (v2f32  FPR64:$src))), (FMOVxd $src)>;
6643 def : Pat<(i64 (bitconvert (v4i16  FPR64:$src))), (FMOVxd $src)>;
6644 def : Pat<(i64 (bitconvert (v8i8  FPR64:$src))), (FMOVxd $src)>;
6645
6646 def : Pat<(i32 (bitconvert (v1i32  FPR32:$src))), (FMOVws $src)>;
6647
6648 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
6649 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
6650 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
6651
6652 def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))), (f64 VPR64:$src)>;
6653 def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))), (f64 VPR64:$src)>;
6654 def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))), (f64 VPR64:$src)>;
6655 def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))), (f64 VPR64:$src)>;
6656 def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))), (f64 VPR64:$src)>;
6657
6658 def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))), (f128 VPR128:$src)>;
6659 def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))), (f128 VPR128:$src)>;
6660 def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))), (f128 VPR128:$src)>;
6661 def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))), (f128 VPR128:$src)>;
6662 def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))), (f128 VPR128:$src)>;
6663 def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))), (f128 VPR128:$src)>;
6664
6665 def : Pat<(v1i16 (bitconvert (f16  FPR16:$src))), (v1i16 FPR16:$src)>;
6666 def : Pat<(v1i32 (bitconvert (f32  FPR32:$src))), (v1i32 FPR32:$src)>;
6667 def : Pat<(v1i64 (bitconvert (f64  FPR64:$src))), (v1i64 FPR64:$src)>;
6668 def : Pat<(v1f64 (bitconvert (f64  FPR64:$src))), (v1f64 FPR64:$src)>;
6669
6670 def : Pat<(v1i64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6671 def : Pat<(v1f64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6672 def : Pat<(v2i32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6673 def : Pat<(v2f32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6674 def : Pat<(v4i16 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6675 def : Pat<(v8i8 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
6676
6677 def : Pat<(v1i32 (bitconvert (i32  GPR32:$src))), (FMOVsw $src)>;
6678
6679 def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))), (v8i8 FPR64:$src)>;
6680 def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
6681 def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
6682 def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
6683 def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
6684
6685 def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))), (v16i8 FPR128:$src)>;
6686 def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))), (v8i16 FPR128:$src)>;
6687 def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))), (v4i32 FPR128:$src)>;
6688 def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))), (v2i64 FPR128:$src)>;
6689 def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))), (v4f32 FPR128:$src)>;
6690 def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))), (v2f64 FPR128:$src)>;
6691
6692 // Scalar Three Same
6693
6694 def neon_uimm3 : Operand<i64>,
6695                    ImmLeaf<i64, [{return Imm < 8;}]> {
6696   let ParserMatchClass = uimm3_asmoperand;
6697   let PrintMethod = "printUImmHexOperand";
6698 }
6699
6700 def neon_uimm4 : Operand<i64>,
6701                    ImmLeaf<i64, [{return Imm < 16;}]> {
6702   let ParserMatchClass = uimm4_asmoperand;
6703   let PrintMethod = "printUImmHexOperand";
6704 }
6705
6706 // Bitwise Extract
6707 class NeonI_Extract<bit q, bits<2> op2, string asmop,
6708                     string OpS, RegisterOperand OpVPR, Operand OpImm>
6709   : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
6710                      (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
6711                      asmop # "\t$Rd." # OpS # ", $Rn." # OpS #
6712                      ", $Rm." # OpS # ", $Index",
6713                      [],
6714                      NoItinerary>,
6715     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>{
6716   bits<4> Index;
6717 }
6718
6719 def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
6720                                VPR64, neon_uimm3> {
6721   let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
6722 }
6723
6724 def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
6725                                VPR128, neon_uimm4> {
6726   let Inst{14-11} = Index;
6727 }
6728
6729 class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
6730                  Operand OpImm>
6731   : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
6732                                  (i64 OpImm:$Imm))),
6733               (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
6734
6735 def : NI_Extract<v8i8,  VPR64,  EXTvvvi_8b,  neon_uimm3>;
6736 def : NI_Extract<v4i16, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6737 def : NI_Extract<v2i32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6738 def : NI_Extract<v1i64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6739 def : NI_Extract<v2f32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6740 def : NI_Extract<v1f64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
6741 def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
6742 def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
6743 def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
6744 def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
6745 def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
6746 def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
6747
6748 // Table lookup
6749 class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
6750              string asmop, string OpS, RegisterOperand OpVPR,
6751              RegisterOperand VecList>
6752   : NeonI_TBL<q, op2, len, op,
6753               (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
6754               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
6755               [],
6756               NoItinerary>,
6757     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
6758
6759 // The vectors in look up table are always 16b
6760 multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
6761   def _8b  : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
6762                     !cast<RegisterOperand>(List # "16B_operand")>;
6763
6764   def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
6765                     !cast<RegisterOperand>(List # "16B_operand")>;
6766 }
6767
6768 defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
6769 defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
6770 defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
6771 defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
6772
6773 // Table lookup extension
6774 class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
6775              string asmop, string OpS, RegisterOperand OpVPR,
6776              RegisterOperand VecList>
6777   : NeonI_TBL<q, op2, len, op,
6778               (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
6779               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
6780               [],
6781               NoItinerary>,
6782     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
6783   let Constraints = "$src = $Rd";
6784 }
6785
6786 // The vectors in look up table are always 16b
6787 multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
6788   def _8b  : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
6789                     !cast<RegisterOperand>(List # "16B_operand")>;
6790
6791   def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
6792                     !cast<RegisterOperand>(List # "16B_operand")>;
6793 }
6794
6795 defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
6796 defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
6797 defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
6798 defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
6799
6800 class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
6801                      RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
6802   : NeonI_copy<0b1, 0b0, 0b0011,
6803                (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
6804                asmop # "\t$Rd." # Res # "[$Imm], $Rn",
6805                [(set (ResTy VPR128:$Rd),
6806                  (ResTy (vector_insert
6807                    (ResTy VPR128:$src),
6808                    (OpTy OpGPR:$Rn),
6809                    (OpImm:$Imm))))],
6810                NoItinerary>,
6811     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
6812   bits<4> Imm;
6813   let Constraints = "$src = $Rd";
6814 }
6815
6816 //Insert element (vector, from main)
6817 def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
6818                            neon_uimm4_bare> {
6819   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6820 }
6821 def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
6822                            neon_uimm3_bare> {
6823   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6824 }
6825 def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
6826                            neon_uimm2_bare> {
6827   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6828 }
6829 def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
6830                            neon_uimm1_bare> {
6831   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6832 }
6833
6834 def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
6835                     (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
6836 def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
6837                     (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
6838 def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
6839                     (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
6840 def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
6841                     (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
6842
6843 class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
6844                              RegisterClass OpGPR, ValueType OpTy,
6845                              Operand OpImm, Instruction INS>
6846   : Pat<(ResTy (vector_insert
6847               (ResTy VPR64:$src),
6848               (OpTy OpGPR:$Rn),
6849               (OpImm:$Imm))),
6850         (ResTy (EXTRACT_SUBREG
6851           (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
6852             OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
6853
6854 def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
6855                                           neon_uimm3_bare, INSbw>;
6856 def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
6857                                           neon_uimm2_bare, INShw>;
6858 def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
6859                                           neon_uimm1_bare, INSsw>;
6860 def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
6861                                           neon_uimm0_bare, INSdx>;
6862
6863 class NeonI_INS_element<string asmop, string Res, Operand ResImm>
6864   : NeonI_insert<0b1, 0b1,
6865                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
6866                  ResImm:$Immd, ResImm:$Immn),
6867                  asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
6868                  [],
6869                  NoItinerary>,
6870     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
6871   let Constraints = "$src = $Rd";
6872   bits<4> Immd;
6873   bits<4> Immn;
6874 }
6875
6876 //Insert element (vector, from element)
6877 def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
6878   let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
6879   let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
6880 }
6881 def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
6882   let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
6883   let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
6884   // bit 11 is unspecified, but should be set to zero.
6885 }
6886 def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
6887   let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
6888   let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
6889   // bits 11-12 are unspecified, but should be set to zero.
6890 }
6891 def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
6892   let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
6893   let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
6894   // bits 11-13 are unspecified, but should be set to zero.
6895 }
6896
6897 def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
6898                     (INSELb VPR128:$Rd, VPR128:$Rn,
6899                       neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
6900 def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
6901                     (INSELh VPR128:$Rd, VPR128:$Rn,
6902                       neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
6903 def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
6904                     (INSELs VPR128:$Rd, VPR128:$Rn,
6905                       neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
6906 def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
6907                     (INSELd VPR128:$Rd, VPR128:$Rn,
6908                       neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
6909
6910 multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
6911                                 ValueType MidTy, Operand StImm, Operand NaImm,
6912                                 Instruction INS> {
6913 def : Pat<(ResTy (vector_insert
6914             (ResTy VPR128:$src),
6915             (MidTy (vector_extract
6916               (ResTy VPR128:$Rn),
6917               (StImm:$Immn))),
6918             (StImm:$Immd))),
6919           (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
6920               StImm:$Immd, StImm:$Immn)>;
6921
6922 def : Pat <(ResTy (vector_insert
6923              (ResTy VPR128:$src),
6924              (MidTy (vector_extract
6925                (NaTy VPR64:$Rn),
6926                (NaImm:$Immn))),
6927              (StImm:$Immd))),
6928            (INS (ResTy VPR128:$src),
6929              (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6930              StImm:$Immd, NaImm:$Immn)>;
6931
6932 def : Pat <(NaTy (vector_insert
6933              (NaTy VPR64:$src),
6934              (MidTy (vector_extract
6935                (ResTy VPR128:$Rn),
6936                (StImm:$Immn))),
6937              (NaImm:$Immd))),
6938            (NaTy (EXTRACT_SUBREG
6939              (ResTy (INS
6940                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6941                (ResTy VPR128:$Rn),
6942                NaImm:$Immd, StImm:$Immn)),
6943              sub_64))>;
6944
6945 def : Pat <(NaTy (vector_insert
6946              (NaTy VPR64:$src),
6947              (MidTy (vector_extract
6948                (NaTy VPR64:$Rn),
6949                (NaImm:$Immn))),
6950              (NaImm:$Immd))),
6951            (NaTy (EXTRACT_SUBREG
6952              (ResTy (INS
6953                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6954                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6955                NaImm:$Immd, NaImm:$Immn)),
6956              sub_64))>;
6957 }
6958
6959 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
6960                             neon_uimm1_bare, INSELs>;
6961 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
6962                             neon_uimm0_bare, INSELd>;
6963 defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
6964                             neon_uimm3_bare, INSELb>;
6965 defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
6966                             neon_uimm2_bare, INSELh>;
6967 defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6968                             neon_uimm1_bare, INSELs>;
6969 defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
6970                             neon_uimm0_bare, INSELd>;
6971
6972 multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
6973                                       ValueType MidTy,
6974                                       RegisterClass OpFPR, Operand ResImm,
6975                                       SubRegIndex SubIndex, Instruction INS> {
6976 def : Pat <(ResTy (vector_insert
6977              (ResTy VPR128:$src),
6978              (MidTy OpFPR:$Rn),
6979              (ResImm:$Imm))),
6980            (INS (ResTy VPR128:$src),
6981              (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
6982              ResImm:$Imm,
6983              (i64 0))>;
6984
6985 def : Pat <(NaTy (vector_insert
6986              (NaTy VPR64:$src),
6987              (MidTy OpFPR:$Rn),
6988              (ResImm:$Imm))),
6989            (NaTy (EXTRACT_SUBREG
6990              (ResTy (INS
6991                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6992                (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
6993                ResImm:$Imm,
6994                (i64 0))),
6995              sub_64))>;
6996 }
6997
6998 defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
6999                                   sub_32, INSELs>;
7000 defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
7001                                   sub_64, INSELd>;
7002
7003 class NeonI_SMOV<string asmop, string Res, bit Q,
7004                  ValueType OpTy, ValueType eleTy,
7005                  Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
7006   : NeonI_copy<Q, 0b0, 0b0101,
7007                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
7008                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
7009                [(set (ResTy ResGPR:$Rd),
7010                  (ResTy (sext_inreg
7011                    (ResTy (vector_extract
7012                      (OpTy VPR128:$Rn), (OpImm:$Imm))),
7013                    eleTy)))],
7014                NoItinerary>,
7015     Sched<[WriteFPALU, ReadFPALU]> {
7016   bits<4> Imm;
7017 }
7018
7019 //Signed integer move (main, from element)
7020 def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
7021                         GPR32, i32> {
7022   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7023 }
7024 def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
7025                         GPR32, i32> {
7026   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7027 }
7028 def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
7029                         GPR64, i64> {
7030   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7031 }
7032 def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
7033                         GPR64, i64> {
7034   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7035 }
7036 def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
7037                         GPR64, i64> {
7038   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7039 }
7040
7041 multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
7042                                ValueType eleTy, Operand StImm,  Operand NaImm,
7043                                Instruction SMOVI> {
7044   def : Pat<(i64 (sext_inreg
7045               (i64 (anyext
7046                 (i32 (vector_extract
7047                   (StTy VPR128:$Rn), (StImm:$Imm))))),
7048               eleTy)),
7049             (SMOVI VPR128:$Rn, StImm:$Imm)>;
7050
7051   def : Pat<(i64 (sext
7052               (i32 (vector_extract
7053                 (StTy VPR128:$Rn), (StImm:$Imm))))),
7054             (SMOVI VPR128:$Rn, StImm:$Imm)>;
7055
7056   def : Pat<(i64 (sext_inreg
7057               (i64 (vector_extract
7058                 (NaTy VPR64:$Rn), (NaImm:$Imm))),
7059               eleTy)),
7060             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7061               NaImm:$Imm)>;
7062
7063   def : Pat<(i64 (sext_inreg
7064               (i64 (anyext
7065                 (i32 (vector_extract
7066                   (NaTy VPR64:$Rn), (NaImm:$Imm))))),
7067               eleTy)),
7068             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7069               NaImm:$Imm)>;
7070
7071   def : Pat<(i64 (sext
7072               (i32 (vector_extract
7073                 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
7074             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7075               NaImm:$Imm)>;
7076 }
7077
7078 defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
7079                           neon_uimm3_bare, SMOVxb>;
7080 defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
7081                           neon_uimm2_bare, SMOVxh>;
7082 defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
7083                           neon_uimm1_bare, SMOVxs>;
7084
7085 class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
7086                           ValueType eleTy, Operand StImm,  Operand NaImm,
7087                           Instruction SMOVI>
7088   : Pat<(i32 (sext_inreg
7089           (i32 (vector_extract
7090             (NaTy VPR64:$Rn), (NaImm:$Imm))),
7091           eleTy)),
7092         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7093           NaImm:$Imm)>;
7094
7095 def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
7096                          neon_uimm3_bare, SMOVwb>;
7097 def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
7098                          neon_uimm2_bare, SMOVwh>;
7099
7100 class NeonI_UMOV<string asmop, string Res, bit Q,
7101                  ValueType OpTy, Operand OpImm,
7102                  RegisterClass ResGPR, ValueType ResTy>
7103   : NeonI_copy<Q, 0b0, 0b0111,
7104                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
7105                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
7106                [(set (ResTy ResGPR:$Rd),
7107                   (ResTy (vector_extract
7108                     (OpTy VPR128:$Rn), (OpImm:$Imm))))],
7109                NoItinerary>,
7110     Sched<[WriteFPALU, ReadFPALU]> {
7111   bits<4> Imm;
7112 }
7113
7114 //Unsigned integer move (main, from element)
7115 def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
7116                          GPR32, i32> {
7117   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7118 }
7119 def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
7120                          GPR32, i32> {
7121   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7122 }
7123 def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
7124                          GPR32, i32> {
7125   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7126 }
7127 def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
7128                          GPR64, i64> {
7129   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
7130 }
7131
7132 def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
7133                     (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
7134 def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
7135                     (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
7136
7137 class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
7138                          Operand StImm,  Operand NaImm,
7139                          Instruction SMOVI>
7140   : Pat<(ResTy (vector_extract
7141           (NaTy VPR64:$Rn), NaImm:$Imm)),
7142         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7143           NaImm:$Imm)>;
7144
7145 def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
7146                         neon_uimm3_bare, UMOVwb>;
7147 def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
7148                         neon_uimm2_bare, UMOVwh>;
7149 def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
7150                         neon_uimm1_bare, UMOVws>;
7151
7152 def : Pat<(i32 (and
7153             (i32 (vector_extract
7154               (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
7155             255)),
7156           (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
7157
7158 def : Pat<(i32 (and
7159             (i32 (vector_extract
7160               (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
7161             65535)),
7162           (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
7163
7164 def : Pat<(i64 (zext
7165             (i32 (vector_extract
7166               (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
7167           (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
7168
7169 def : Pat<(i32 (and
7170             (i32 (vector_extract
7171               (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
7172             255)),
7173           (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7174             neon_uimm3_bare:$Imm)>;
7175
7176 def : Pat<(i32 (and
7177             (i32 (vector_extract
7178               (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
7179             65535)),
7180           (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7181             neon_uimm2_bare:$Imm)>;
7182
7183 def : Pat<(i64 (zext
7184             (i32 (vector_extract
7185               (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
7186           (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7187             neon_uimm0_bare:$Imm)>;
7188
7189 // Additional copy patterns for scalar types
7190 def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
7191           (UMOVwb (v16i8
7192             (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
7193
7194 def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
7195           (UMOVwh (v8i16
7196             (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
7197
7198 def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
7199           (FMOVws FPR32:$Rn)>;
7200
7201 def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
7202           (FMOVxd FPR64:$Rn)>;
7203
7204 def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
7205           (f64 FPR64:$Rn)>;
7206
7207 def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
7208           (v1i8 (EXTRACT_SUBREG (v16i8
7209             (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
7210             sub_8))>;
7211
7212 def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
7213           (v1i16 (EXTRACT_SUBREG (v8i16
7214             (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
7215             sub_16))>;
7216
7217 def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
7218           (FMOVsw $src)>;
7219
7220 def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
7221           (FMOVdx $src)>;
7222
7223 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
7224           (v8i8 (EXTRACT_SUBREG (v16i8
7225             (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
7226             sub_64))>;
7227
7228 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
7229           (v4i16 (EXTRACT_SUBREG (v8i16
7230             (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
7231             sub_64))>;
7232
7233 def : Pat<(v2i32 (scalar_to_vector GPR32:$Rn)),
7234           (v2i32 (EXTRACT_SUBREG (v16i8
7235             (INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))),
7236             sub_64))>;
7237
7238 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
7239           (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7240
7241 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
7242           (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7243
7244 def : Pat<(v4i32 (scalar_to_vector GPR32:$Rn)),
7245           (INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7246
7247 def : Pat<(v2i64 (scalar_to_vector GPR64:$Rn)),
7248           (INSdx (v2i64 (IMPLICIT_DEF)), $Rn, (i64 0))>;
7249
7250 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
7251           (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)>;
7252 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
7253           (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)>;
7254
7255 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
7256           (v1f64 FPR64:$Rn)>;
7257
7258 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
7259           (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
7260                          (f64 FPR64:$src), sub_64)>;
7261
7262 class NeonI_DUP_Elt<bit Q, string asmop, string rdlane,  string rnlane,
7263                     RegisterOperand ResVPR, Operand OpImm>
7264   : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
7265                (ins VPR128:$Rn, OpImm:$Imm),
7266                asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
7267                [],
7268                NoItinerary>,
7269     Sched<[WriteFPALU, ReadFPALU]> {
7270   bits<4> Imm;
7271 }
7272
7273 def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
7274                               neon_uimm4_bare> {
7275   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7276 }
7277
7278 def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
7279                               neon_uimm3_bare> {
7280   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7281 }
7282
7283 def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
7284                               neon_uimm2_bare> {
7285   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7286 }
7287
7288 def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
7289                               neon_uimm1_bare> {
7290   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
7291 }
7292
7293 def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
7294                               neon_uimm4_bare> {
7295   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7296 }
7297
7298 def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
7299                               neon_uimm3_bare> {
7300   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7301 }
7302
7303 def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
7304                               neon_uimm2_bare> {
7305   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7306 }
7307
7308 multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
7309                                        ValueType OpTy,ValueType NaTy,
7310                                        ValueType ExTy, Operand OpLImm,
7311                                        Operand OpNImm> {
7312 def  : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
7313         (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
7314
7315 def : Pat<(ResTy (Neon_vduplane
7316             (NaTy VPR64:$Rn), OpNImm:$Imm)),
7317           (ResTy (DUPELT
7318             (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
7319 }
7320 defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
7321                              neon_uimm4_bare, neon_uimm3_bare>;
7322 defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
7323                              neon_uimm4_bare, neon_uimm3_bare>;
7324 defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
7325                              neon_uimm3_bare, neon_uimm2_bare>;
7326 defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
7327                              neon_uimm3_bare, neon_uimm2_bare>;
7328 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
7329                              neon_uimm2_bare, neon_uimm1_bare>;
7330 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
7331                              neon_uimm2_bare, neon_uimm1_bare>;
7332 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
7333                              neon_uimm1_bare, neon_uimm0_bare>;
7334 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
7335                              neon_uimm2_bare, neon_uimm1_bare>;
7336 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
7337                              neon_uimm2_bare, neon_uimm1_bare>;
7338 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
7339                              neon_uimm1_bare, neon_uimm0_bare>;
7340
7341 def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
7342           (v2f32 (DUPELT2s
7343             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
7344             (i64 0)))>;
7345 def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
7346           (v4f32 (DUPELT4s
7347             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
7348             (i64 0)))>;
7349 def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
7350           (v2f64 (DUPELT2d
7351             (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
7352             (i64 0)))>;
7353
7354 multiclass NeonI_DUP_pattern<Instruction DUPELT, ValueType ResTy,
7355                              ValueType OpTy, RegisterClass OpRC,
7356                              Operand OpNImm, SubRegIndex SubIndex> {
7357 def : Pat<(ResTy (Neon_vduplane (OpTy OpRC:$Rn), OpNImm:$Imm)),
7358           (ResTy (DUPELT
7359             (SUBREG_TO_REG (i64 0), OpRC:$Rn, SubIndex), OpNImm:$Imm))>;
7360 }
7361
7362 defm : NeonI_DUP_pattern<DUPELT4h, v4i16, v1i16, FPR16, neon_uimm2_bare,sub_16>;
7363 defm : NeonI_DUP_pattern<DUPELT4s, v4i32, v1i32, FPR32, neon_uimm2_bare,sub_32>;
7364 defm : NeonI_DUP_pattern<DUPELT8b, v8i8, v1i8, FPR8, neon_uimm3_bare, sub_8>;
7365 defm : NeonI_DUP_pattern<DUPELT8h, v8i16, v1i16, FPR16, neon_uimm3_bare,sub_16>;
7366 defm : NeonI_DUP_pattern<DUPELT16b, v16i8, v1i8, FPR8, neon_uimm4_bare, sub_8>;
7367
7368 class NeonI_DUP<bit Q, string asmop, string rdlane,
7369                 RegisterOperand ResVPR, ValueType ResTy,
7370                 RegisterClass OpGPR, ValueType OpTy>
7371   : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
7372                asmop # "\t$Rd" # rdlane # ", $Rn",
7373                [(set (ResTy ResVPR:$Rd),
7374                  (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
7375                NoItinerary>,
7376     Sched<[WriteFPALU, ReadFPALU]>;
7377
7378 def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
7379   let Inst{20-16} = 0b00001;
7380   // bits 17-20 are unspecified, but should be set to zero.
7381 }
7382
7383 def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
7384   let Inst{20-16} = 0b00010;
7385   // bits 18-20 are unspecified, but should be set to zero.
7386 }
7387
7388 def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
7389   let Inst{20-16} = 0b00100;
7390   // bits 19-20 are unspecified, but should be set to zero.
7391 }
7392
7393 def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
7394   let Inst{20-16} = 0b01000;
7395   // bit 20 is unspecified, but should be set to zero.
7396 }
7397
7398 def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
7399   let Inst{20-16} = 0b00001;
7400   // bits 17-20 are unspecified, but should be set to zero.
7401 }
7402
7403 def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
7404   let Inst{20-16} = 0b00010;
7405   // bits 18-20 are unspecified, but should be set to zero.
7406 }
7407
7408 def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
7409   let Inst{20-16} = 0b00100;
7410   // bits 19-20 are unspecified, but should be set to zero.
7411 }
7412
7413 // patterns for CONCAT_VECTORS
7414 multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
7415 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
7416           (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
7417 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
7418           (INSELd
7419             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7420             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
7421             (i64 1),
7422             (i64 0))>;
7423 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
7424           (DUPELT2d
7425             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7426             (i64 0))> ;
7427 }
7428
7429 defm : Concat_Vector_Pattern<v16i8, v8i8>;
7430 defm : Concat_Vector_Pattern<v8i16, v4i16>;
7431 defm : Concat_Vector_Pattern<v4i32, v2i32>;
7432 defm : Concat_Vector_Pattern<v2i64, v1i64>;
7433 defm : Concat_Vector_Pattern<v4f32, v2f32>;
7434 defm : Concat_Vector_Pattern<v2f64, v1f64>;
7435
7436 def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), undef)),
7437           (v2i32 (SUBREG_TO_REG(i64 0), $Rn, sub_32))>;
7438 def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
7439           (EXTRACT_SUBREG 
7440             (v4i32 (INSELs
7441               (v4i32 (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)),
7442               (v4i32 (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
7443               (i64 1),
7444               (i64 0))),
7445             sub_64)>;
7446 def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rn))),
7447           (DUPELT2s (v4i32 (SUBREG_TO_REG(i64 0), $Rn, sub_32)), 0)>;
7448
7449 //patterns for EXTRACT_SUBVECTOR
7450 def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
7451           (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7452 def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
7453           (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7454 def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
7455           (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7456 def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
7457           (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7458 def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
7459           (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7460 def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
7461           (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7462
7463 // The followings are for instruction class (3V Elem)
7464
7465 // Variant 1
7466
7467 class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
7468              string asmop, string ResS, string OpS, string EleOpS,
7469              Operand OpImm, RegisterOperand ResVPR,
7470              RegisterOperand OpVPR, RegisterOperand EleOpVPR>
7471   : NeonI_2VElem<q, u, size, opcode,
7472                  (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
7473                                          EleOpVPR:$Re, OpImm:$Index),
7474                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
7475                  ", $Re." # EleOpS # "[$Index]",
7476                  [],
7477                  NoItinerary>,
7478     Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
7479   bits<3> Index;
7480   bits<5> Re;
7481
7482   let Constraints = "$src = $Rd";
7483 }
7484
7485 multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
7486   // vector register class for element is always 128-bit to cover the max index
7487   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7488                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
7489     let Inst{11} = {Index{1}};
7490     let Inst{21} = {Index{0}};
7491     let Inst{20-16} = Re;
7492   }
7493
7494   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7495                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
7496     let Inst{11} = {Index{1}};
7497     let Inst{21} = {Index{0}};
7498     let Inst{20-16} = Re;
7499   }
7500
7501   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7502   def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
7503                      neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
7504     let Inst{11} = {Index{2}};
7505     let Inst{21} = {Index{1}};
7506     let Inst{20} = {Index{0}};
7507     let Inst{19-16} = Re{3-0};
7508   }
7509
7510   def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
7511                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7512     let Inst{11} = {Index{2}};
7513     let Inst{21} = {Index{1}};
7514     let Inst{20} = {Index{0}};
7515     let Inst{19-16} = Re{3-0};
7516   }
7517 }
7518
7519 defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
7520 defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
7521
7522 // Pattern for lane in 128-bit vector
7523 class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
7524                    RegisterOperand ResVPR, RegisterOperand OpVPR,
7525                    RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
7526                    ValueType EleOpTy>
7527   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
7528           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7529         (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
7530
7531 // Pattern for lane in 64-bit vector
7532 class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
7533                   RegisterOperand ResVPR, RegisterOperand OpVPR,
7534                   RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
7535                   ValueType EleOpTy>
7536   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
7537           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7538         (INST ResVPR:$src, OpVPR:$Rn,
7539           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
7540
7541 multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
7542 {
7543   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
7544                      op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32>;
7545
7546   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
7547                      op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32>;
7548
7549   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
7550                      op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
7551
7552   def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
7553                      op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
7554
7555   // Index can only be half of the max value for lane in 64-bit vector
7556
7557   def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
7558                     op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32>;
7559
7560   def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
7561                     op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
7562 }
7563
7564 defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
7565 defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
7566
7567 class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
7568                  string asmop, string ResS, string OpS, string EleOpS,
7569                  Operand OpImm, RegisterOperand ResVPR,
7570                  RegisterOperand OpVPR, RegisterOperand EleOpVPR>
7571   : NeonI_2VElem<q, u, size, opcode,
7572                  (outs ResVPR:$Rd), (ins OpVPR:$Rn,
7573                                          EleOpVPR:$Re, OpImm:$Index),
7574                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
7575                  ", $Re." # EleOpS # "[$Index]",
7576                  [],
7577                  NoItinerary>,
7578     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
7579   bits<3> Index;
7580   bits<5> Re;
7581 }
7582
7583 multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
7584   // vector register class for element is always 128-bit to cover the max index
7585   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7586                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
7587     let Inst{11} = {Index{1}};
7588     let Inst{21} = {Index{0}};
7589     let Inst{20-16} = Re;
7590   }
7591
7592   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7593                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
7594     let Inst{11} = {Index{1}};
7595     let Inst{21} = {Index{0}};
7596     let Inst{20-16} = Re;
7597   }
7598
7599   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7600   def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
7601                          neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
7602     let Inst{11} = {Index{2}};
7603     let Inst{21} = {Index{1}};
7604     let Inst{20} = {Index{0}};
7605     let Inst{19-16} = Re{3-0};
7606   }
7607
7608   def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
7609                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7610     let Inst{11} = {Index{2}};
7611     let Inst{21} = {Index{1}};
7612     let Inst{20} = {Index{0}};
7613     let Inst{19-16} = Re{3-0};
7614   }
7615 }
7616
7617 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
7618 defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
7619 defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
7620 defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
7621 }
7622
7623 // Pattern for lane in 128-bit vector
7624 class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
7625                        RegisterOperand OpVPR, RegisterOperand EleOpVPR,
7626                        ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
7627   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
7628           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7629         (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
7630
7631 // Pattern for lane in 64-bit vector
7632 class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
7633                       RegisterOperand OpVPR, RegisterOperand EleOpVPR,
7634                       ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
7635   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
7636           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
7637         (INST OpVPR:$Rn,
7638           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
7639
7640 multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
7641   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
7642                          op, VPR64, VPR128, v2i32, v2i32, v4i32>;
7643
7644   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
7645                          op, VPR128, VPR128, v4i32, v4i32, v4i32>;
7646
7647   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
7648                          op, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
7649
7650   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
7651                          op, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
7652
7653   // Index can only be half of the max value for lane in 64-bit vector
7654
7655   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
7656                         op, VPR64, VPR64, v2i32, v2i32, v2i32>;
7657
7658   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
7659                         op, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
7660 }
7661
7662 defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
7663 defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
7664 defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
7665
7666 // Variant 2
7667
7668 multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
7669   // vector register class for element is always 128-bit to cover the max index
7670   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7671                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
7672     let Inst{11} = {Index{1}};
7673     let Inst{21} = {Index{0}};
7674     let Inst{20-16} = Re;
7675   }
7676
7677   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7678                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
7679     let Inst{11} = {Index{1}};
7680     let Inst{21} = {Index{0}};
7681     let Inst{20-16} = Re;
7682   }
7683
7684   // _1d2d doesn't exist!
7685
7686   def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
7687                          neon_uimm1_bare, VPR128, VPR128, VPR128> {
7688     let Inst{11} = {Index{0}};
7689     let Inst{21} = 0b0;
7690     let Inst{20-16} = Re;
7691   }
7692 }
7693
7694 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
7695 defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
7696 defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
7697 }
7698
7699 class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
7700                          RegisterOperand OpVPR, RegisterOperand EleOpVPR,
7701                          ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
7702                          SDPatternOperator coreop>
7703   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
7704           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
7705         (INST OpVPR:$Rn,
7706           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
7707
7708 multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
7709   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
7710                          op, VPR64, VPR128, v2f32, v2f32, v4f32>;
7711
7712   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
7713                          op, VPR128, VPR128, v4f32, v4f32, v4f32>;
7714
7715   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
7716                          op, VPR128, VPR128, v2f64, v2f64, v2f64>;
7717
7718   // Index can only be half of the max value for lane in 64-bit vector
7719
7720   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
7721                         op, VPR64, VPR64, v2f32, v2f32, v2f32>;
7722
7723   def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
7724                            op, VPR128, VPR64, v2f64, v2f64, v1f64,
7725                            BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
7726 }
7727
7728 defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
7729 defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
7730
7731 def : Pat<(v2f32 (fmul (v2f32 (Neon_vdup (f32 FPR32:$Re))),
7732                        (v2f32 VPR64:$Rn))),
7733           (FMULve_2s4s VPR64:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7734
7735 def : Pat<(v4f32 (fmul (v4f32 (Neon_vdup (f32 FPR32:$Re))),
7736                        (v4f32 VPR128:$Rn))),
7737           (FMULve_4s4s VPR128:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7738
7739 def : Pat<(v2f64 (fmul (v2f64 (Neon_vdup (f64 FPR64:$Re))),
7740                        (v2f64 VPR128:$Rn))),
7741           (FMULve_2d2d VPR128:$Rn, (SUBREG_TO_REG (i64 0), $Re, sub_64), 0)>;
7742
7743 // The followings are patterns using fma
7744 // -ffp-contract=fast generates fma
7745
7746 multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
7747   // vector register class for element is always 128-bit to cover the max index
7748   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
7749                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
7750     let Inst{11} = {Index{1}};
7751     let Inst{21} = {Index{0}};
7752     let Inst{20-16} = Re;
7753   }
7754
7755   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
7756                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
7757     let Inst{11} = {Index{1}};
7758     let Inst{21} = {Index{0}};
7759     let Inst{20-16} = Re;
7760   }
7761
7762   // _1d2d doesn't exist!
7763
7764   def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
7765                      neon_uimm1_bare, VPR128, VPR128, VPR128> {
7766     let Inst{11} = {Index{0}};
7767     let Inst{21} = 0b0;
7768     let Inst{20-16} = Re;
7769   }
7770 }
7771
7772 defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
7773 defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
7774
7775 // Pattern for lane in 128-bit vector
7776 class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
7777                        RegisterOperand ResVPR, RegisterOperand OpVPR,
7778                        ValueType ResTy, ValueType OpTy,
7779                        SDPatternOperator coreop>
7780   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
7781                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
7782         (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
7783
7784 // Pattern for lane 0
7785 class NI_2VEfma_lane0<Instruction INST, SDPatternOperator op,
7786                       RegisterOperand ResVPR, ValueType ResTy>
7787   : Pat<(ResTy (op (ResTy ResVPR:$Rn),
7788                    (ResTy (Neon_vdup (f32 FPR32:$Re))),
7789                    (ResTy ResVPR:$src))),
7790         (INST ResVPR:$src, ResVPR:$Rn,
7791               (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7792
7793 // Pattern for lane in 64-bit vector
7794 class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
7795                       RegisterOperand ResVPR, RegisterOperand OpVPR,
7796                       ValueType ResTy, ValueType OpTy,
7797                       SDPatternOperator coreop>
7798   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
7799                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
7800         (INST ResVPR:$src, ResVPR:$Rn,
7801           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
7802
7803 // Pattern for lane in 64-bit vector
7804 class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
7805                            SDPatternOperator op,
7806                            RegisterOperand ResVPR, RegisterOperand OpVPR,
7807                            ValueType ResTy, ValueType OpTy,
7808                            SDPatternOperator coreop>
7809   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
7810                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
7811         (INST ResVPR:$src, ResVPR:$Rn,
7812           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
7813
7814
7815 multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
7816   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
7817                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
7818                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7819
7820   def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_2s4s"),
7821                         op, VPR64, v2f32>;
7822
7823   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
7824                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
7825                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7826
7827   def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_4s4s"),
7828                         op, VPR128, v4f32>;
7829
7830   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
7831                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
7832                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7833
7834   // Index can only be half of the max value for lane in 64-bit vector
7835
7836   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
7837                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
7838                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
7839
7840   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
7841                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
7842                              BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
7843 }
7844
7845 defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
7846
7847 // Pattern for lane 0
7848 class NI_2VEfms_lane0<Instruction INST, SDPatternOperator op,
7849                       RegisterOperand ResVPR, ValueType ResTy>
7850   : Pat<(ResTy (op (ResTy (fneg ResVPR:$Rn)),
7851                    (ResTy (Neon_vdup (f32 FPR32:$Re))),
7852                    (ResTy ResVPR:$src))),
7853         (INST ResVPR:$src, ResVPR:$Rn,
7854               (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
7855
7856 multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
7857 {
7858   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
7859                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
7860                          BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
7861
7862   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
7863                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
7864                          BinOpFrag<(Neon_vduplane
7865                                      (fneg node:$LHS), node:$RHS)>>;
7866
7867   def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_2s4s"),
7868                         op, VPR64, v2f32>;
7869
7870   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
7871                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
7872                          BinOpFrag<(fneg (Neon_vduplane
7873                                      node:$LHS, node:$RHS))>>;
7874
7875   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
7876                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
7877                          BinOpFrag<(Neon_vduplane
7878                                      (fneg node:$LHS), node:$RHS)>>;
7879
7880   def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_4s4s"),
7881                         op, VPR128, v4f32>;
7882
7883   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
7884                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
7885                          BinOpFrag<(fneg (Neon_vduplane
7886                                      node:$LHS, node:$RHS))>>;
7887
7888   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
7889                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
7890                          BinOpFrag<(Neon_vduplane
7891                                      (fneg node:$LHS), node:$RHS)>>;
7892
7893   // Index can only be half of the max value for lane in 64-bit vector
7894
7895   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
7896                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
7897                         BinOpFrag<(fneg (Neon_vduplane
7898                                     node:$LHS, node:$RHS))>>;
7899
7900   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
7901                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
7902                         BinOpFrag<(Neon_vduplane
7903                                     (fneg node:$LHS), node:$RHS)>>;
7904
7905   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
7906                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
7907                         BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
7908
7909   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
7910                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
7911                         BinOpFrag<(Neon_vduplane (fneg node:$LHS), node:$RHS)>>;
7912
7913   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
7914                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
7915                              BinOpFrag<(fneg (Neon_combine_2d
7916                                          node:$LHS, node:$RHS))>>;
7917
7918   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
7919                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
7920                              BinOpFrag<(Neon_combine_2d
7921                                          (fneg node:$LHS), (fneg node:$RHS))>>;
7922 }
7923
7924 defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
7925
7926 // Variant 3: Long type
7927 // E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
7928 //      SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
7929
7930 multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
7931   // vector register class for element is always 128-bit to cover the max index
7932   def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
7933                      neon_uimm2_bare, VPR128, VPR64, VPR128> {
7934     let Inst{11} = {Index{1}};
7935     let Inst{21} = {Index{0}};
7936     let Inst{20-16} = Re;
7937   }
7938
7939   def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
7940                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
7941     let Inst{11} = {Index{1}};
7942     let Inst{21} = {Index{0}};
7943     let Inst{20-16} = Re;
7944   }
7945
7946   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7947   def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
7948                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7949     let Inst{11} = {Index{2}};
7950     let Inst{21} = {Index{1}};
7951     let Inst{20} = {Index{0}};
7952     let Inst{19-16} = Re{3-0};
7953   }
7954
7955   def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
7956                      neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
7957     let Inst{11} = {Index{2}};
7958     let Inst{21} = {Index{1}};
7959     let Inst{20} = {Index{0}};
7960     let Inst{19-16} = Re{3-0};
7961   }
7962 }
7963
7964 defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
7965 defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
7966 defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
7967 defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
7968 defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
7969 defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
7970
7971 multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
7972   // vector register class for element is always 128-bit to cover the max index
7973   def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
7974                          neon_uimm2_bare, VPR128, VPR64, VPR128> {
7975     let Inst{11} = {Index{1}};
7976     let Inst{21} = {Index{0}};
7977     let Inst{20-16} = Re;
7978   }
7979
7980   def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
7981                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
7982     let Inst{11} = {Index{1}};
7983     let Inst{21} = {Index{0}};
7984     let Inst{20-16} = Re;
7985   }
7986
7987   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
7988   def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
7989                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
7990     let Inst{11} = {Index{2}};
7991     let Inst{21} = {Index{1}};
7992     let Inst{20} = {Index{0}};
7993     let Inst{19-16} = Re{3-0};
7994   }
7995
7996   def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
7997                          neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
7998     let Inst{11} = {Index{2}};
7999     let Inst{21} = {Index{1}};
8000     let Inst{20} = {Index{0}};
8001     let Inst{19-16} = Re{3-0};
8002   }
8003 }
8004
8005 let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
8006 defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
8007 defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
8008 defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
8009 }
8010
8011 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
8012           (FMOVdd $src)>;
8013
8014 // Pattern for lane in 128-bit vector
8015 class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
8016                      RegisterOperand EleOpVPR, ValueType ResTy,
8017                      ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8018                      SDPatternOperator hiop>
8019   : Pat<(ResTy (op (ResTy VPR128:$src),
8020           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8021           (HalfOpTy (Neon_vduplane
8022                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8023         (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
8024
8025 // Pattern for lane in 64-bit vector
8026 class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
8027                     RegisterOperand EleOpVPR, ValueType ResTy,
8028                     ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8029                     SDPatternOperator hiop>
8030   : Pat<(ResTy (op (ResTy VPR128:$src),
8031           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8032           (HalfOpTy (Neon_vduplane
8033                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8034         (INST VPR128:$src, VPR128:$Rn,
8035           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
8036
8037 class NI_2VEL2_lane0<Instruction INST, SDPatternOperator op,
8038                      ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
8039                      SDPatternOperator hiop, Instruction DupInst>
8040   : Pat<(ResTy (op (ResTy VPR128:$src),
8041           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8042           (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
8043         (INST VPR128:$src, VPR128:$Rn, (DupInst $Re), 0)>;
8044
8045 multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
8046   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
8047                      op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
8048
8049   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
8050                      op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32>;
8051
8052   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
8053                        op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
8054
8055   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
8056                        op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
8057
8058   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
8059                        op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
8060
8061   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
8062                        op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
8063
8064   // Index can only be half of the max value for lane in 64-bit vector
8065
8066   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
8067                     op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
8068
8069   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
8070                     op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32>;
8071
8072   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
8073                       op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
8074
8075   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
8076                       op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
8077 }
8078
8079 defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
8080 defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
8081 defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
8082 defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
8083
8084 // Pattern for lane in 128-bit vector
8085 class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
8086                          RegisterOperand EleOpVPR, ValueType ResTy,
8087                          ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8088                          SDPatternOperator hiop>
8089   : Pat<(ResTy (op
8090           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8091           (HalfOpTy (Neon_vduplane
8092                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8093         (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
8094
8095 // Pattern for lane in 64-bit vector
8096 class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
8097                         RegisterOperand EleOpVPR, ValueType ResTy,
8098                         ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
8099                         SDPatternOperator hiop>
8100   : Pat<(ResTy (op
8101           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8102           (HalfOpTy (Neon_vduplane
8103                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
8104         (INST VPR128:$Rn,
8105           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
8106
8107 // Pattern for fixed lane 0
8108 class NI_2VEL2_mul_lane0<Instruction INST, SDPatternOperator op,
8109                          ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
8110                          SDPatternOperator hiop, Instruction DupInst>
8111   : Pat<(ResTy (op
8112           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
8113           (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
8114         (INST VPR128:$Rn, (DupInst $Re), 0)>;
8115
8116 multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
8117   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
8118                          op, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
8119
8120   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
8121                          op, VPR64, VPR128, v2i64, v2i32, v4i32>;
8122
8123   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
8124                          op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
8125
8126   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
8127                            op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
8128
8129   def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_4s8h"),
8130                            op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
8131
8132   def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_2d4s"),
8133                            op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
8134
8135   // Index can only be half of the max value for lane in 64-bit vector
8136
8137   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
8138                         op, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
8139
8140   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
8141                         op, VPR64, VPR64, v2i64, v2i32, v2i32>;
8142
8143   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
8144                           op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
8145
8146   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
8147                           op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
8148 }
8149
8150 defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
8151 defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
8152 defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
8153
8154 multiclass NI_qdma<SDPatternOperator op> {
8155   def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
8156                     (op node:$Ra,
8157                       (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
8158
8159   def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
8160                     (op node:$Ra,
8161                       (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
8162 }
8163
8164 defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
8165 defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
8166
8167 multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
8168   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
8169                      !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
8170                      v4i32, v4i16, v8i16>;
8171
8172   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
8173                      !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
8174                      v2i64, v2i32, v4i32>;
8175
8176   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
8177                        !cast<PatFrag>(op # "_4s"), VPR128Lo,
8178                        v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
8179
8180   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
8181                        !cast<PatFrag>(op # "_2d"), VPR128,
8182                        v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
8183
8184   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
8185                        !cast<PatFrag>(op # "_4s"),
8186                        v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
8187
8188   def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
8189                        !cast<PatFrag>(op # "_2d"),
8190                        v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
8191
8192   // Index can only be half of the max value for lane in 64-bit vector
8193
8194   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
8195                     !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
8196                     v4i32, v4i16, v4i16>;
8197
8198   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
8199                     !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
8200                     v2i64, v2i32, v2i32>;
8201
8202   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
8203                       !cast<PatFrag>(op # "_4s"), VPR64Lo,
8204                       v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
8205
8206   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
8207                       !cast<PatFrag>(op # "_2d"), VPR64,
8208                       v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
8209 }
8210
8211 defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
8212 defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
8213
8214 // End of implementation for instruction class (3V Elem)
8215
8216 class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
8217                 bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
8218                 SDPatternOperator Neon_Rev>
8219   : NeonI_2VMisc<Q, U, size, opcode,
8220                (outs ResVPR:$Rd), (ins ResVPR:$Rn),
8221                asmop # "\t$Rd." # Res # ", $Rn." # Res,
8222                [(set (ResTy ResVPR:$Rd),
8223                   (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
8224                NoItinerary>,
8225     Sched<[WriteFPALU, ReadFPALU]>;
8226
8227 def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
8228                           v16i8, Neon_rev64>;
8229 def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
8230                          v8i16, Neon_rev64>;
8231 def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
8232                          v4i32, Neon_rev64>;
8233 def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
8234                          v8i8, Neon_rev64>;
8235 def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
8236                          v4i16, Neon_rev64>;
8237 def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
8238                          v2i32, Neon_rev64>;
8239
8240 def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
8241 def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
8242
8243 def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
8244                           v16i8, Neon_rev32>;
8245 def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
8246                           v8i16, Neon_rev32>;
8247 def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
8248                          v8i8, Neon_rev32>;
8249 def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
8250                          v4i16, Neon_rev32>;
8251
8252 def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
8253                           v16i8, Neon_rev16>;
8254 def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
8255                          v8i8, Neon_rev16>;
8256
8257 multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
8258                              SDPatternOperator Neon_Padd> {
8259   def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8260                            (outs VPR128:$Rd), (ins VPR128:$Rn),
8261                            asmop # "\t$Rd.8h, $Rn.16b",
8262                            [(set (v8i16 VPR128:$Rd),
8263                               (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
8264                            NoItinerary>,
8265               Sched<[WriteFPALU, ReadFPALU]>;
8266
8267   def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8268                           (outs VPR64:$Rd), (ins VPR64:$Rn),
8269                           asmop # "\t$Rd.4h, $Rn.8b",
8270                           [(set (v4i16 VPR64:$Rd),
8271                              (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
8272                           NoItinerary>,
8273              Sched<[WriteFPALU, ReadFPALU]>;
8274
8275   def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8276                            (outs VPR128:$Rd), (ins VPR128:$Rn),
8277                            asmop # "\t$Rd.4s, $Rn.8h",
8278                            [(set (v4i32 VPR128:$Rd),
8279                               (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
8280                            NoItinerary>,
8281              Sched<[WriteFPALU, ReadFPALU]>;
8282
8283   def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8284                           (outs VPR64:$Rd), (ins VPR64:$Rn),
8285                           asmop # "\t$Rd.2s, $Rn.4h",
8286                           [(set (v2i32 VPR64:$Rd),
8287                              (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
8288                           NoItinerary>,
8289              Sched<[WriteFPALU, ReadFPALU]>;
8290
8291   def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
8292                            (outs VPR128:$Rd), (ins VPR128:$Rn),
8293                            asmop # "\t$Rd.2d, $Rn.4s",
8294                            [(set (v2i64 VPR128:$Rd),
8295                               (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
8296                            NoItinerary>,
8297              Sched<[WriteFPALU, ReadFPALU]>;
8298
8299   def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
8300                           (outs VPR64:$Rd), (ins VPR64:$Rn),
8301                           asmop # "\t$Rd.1d, $Rn.2s",
8302                           [(set (v1i64 VPR64:$Rd),
8303                              (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
8304                           NoItinerary>,
8305              Sched<[WriteFPALU, ReadFPALU]>;
8306 }
8307
8308 defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
8309                                 int_arm_neon_vpaddls>;
8310 defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
8311                                 int_arm_neon_vpaddlu>;
8312
8313 def : Pat<(v1i64 (int_aarch64_neon_saddlv (v2i32 VPR64:$Rn))),
8314           (SADDLP2s1d $Rn)>;
8315 def : Pat<(v1i64 (int_aarch64_neon_uaddlv (v2i32 VPR64:$Rn))),
8316           (UADDLP2s1d $Rn)>;
8317
8318 multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
8319                              SDPatternOperator Neon_Padd> {
8320   let Constraints = "$src = $Rd" in {
8321     def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8322                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8323                              asmop # "\t$Rd.8h, $Rn.16b",
8324                              [(set (v8i16 VPR128:$Rd),
8325                                 (v8i16 (Neon_Padd
8326                                   (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
8327                              NoItinerary>,
8328                 Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8329
8330     def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8331                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8332                             asmop # "\t$Rd.4h, $Rn.8b",
8333                             [(set (v4i16 VPR64:$Rd),
8334                                (v4i16 (Neon_Padd
8335                                  (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
8336                             NoItinerary>,
8337                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8338
8339     def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8340                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8341                             asmop # "\t$Rd.4s, $Rn.8h",
8342                             [(set (v4i32 VPR128:$Rd),
8343                                (v4i32 (Neon_Padd
8344                                  (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
8345                             NoItinerary>,
8346                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8347
8348     def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8349                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8350                             asmop # "\t$Rd.2s, $Rn.4h",
8351                             [(set (v2i32 VPR64:$Rd),
8352                                (v2i32 (Neon_Padd
8353                                  (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
8354                             NoItinerary>,
8355                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8356
8357     def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
8358                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8359                             asmop # "\t$Rd.2d, $Rn.4s",
8360                             [(set (v2i64 VPR128:$Rd),
8361                                (v2i64 (Neon_Padd
8362                                  (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
8363                             NoItinerary>,
8364                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8365
8366     def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
8367                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8368                             asmop # "\t$Rd.1d, $Rn.2s",
8369                             [(set (v1i64 VPR64:$Rd),
8370                                (v1i64 (Neon_Padd
8371                                  (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
8372                             NoItinerary>,
8373                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8374   }
8375 }
8376
8377 defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
8378                                    int_arm_neon_vpadals>;
8379 defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
8380                                    int_arm_neon_vpadalu>;
8381
8382 multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
8383   def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
8384                          (outs VPR128:$Rd), (ins VPR128:$Rn),
8385                          asmop # "\t$Rd.16b, $Rn.16b",
8386                          [], NoItinerary>,
8387             Sched<[WriteFPALU, ReadFPALU]>;
8388
8389   def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
8390                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8391                         asmop # "\t$Rd.8h, $Rn.8h",
8392                         [], NoItinerary>,
8393            Sched<[WriteFPALU, ReadFPALU]>;
8394
8395   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8396                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8397                         asmop # "\t$Rd.4s, $Rn.4s",
8398                         [], NoItinerary>,
8399            Sched<[WriteFPALU, ReadFPALU]>;
8400
8401   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
8402                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8403                         asmop # "\t$Rd.2d, $Rn.2d",
8404                         [], NoItinerary>,
8405            Sched<[WriteFPALU, ReadFPALU]>;
8406
8407   def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
8408                          (outs VPR64:$Rd), (ins VPR64:$Rn),
8409                          asmop # "\t$Rd.8b, $Rn.8b",
8410                          [], NoItinerary>,
8411            Sched<[WriteFPALU, ReadFPALU]>;
8412
8413   def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
8414                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8415                         asmop # "\t$Rd.4h, $Rn.4h",
8416                         [], NoItinerary>,
8417            Sched<[WriteFPALU, ReadFPALU]>;
8418
8419   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8420                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8421                         asmop # "\t$Rd.2s, $Rn.2s",
8422                         [], NoItinerary>,
8423            Sched<[WriteFPALU, ReadFPALU]>;
8424 }
8425
8426 defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
8427 defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
8428 defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
8429 defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
8430
8431 multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
8432                                           SDPatternOperator Neon_Op> {
8433   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
8434             (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
8435
8436   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
8437             (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
8438
8439   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
8440             (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
8441
8442   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
8443             (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
8444
8445   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
8446             (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
8447
8448   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
8449             (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
8450
8451   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
8452             (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
8453 }
8454
8455 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
8456 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
8457 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
8458
8459 def : Pat<(v16i8 (sub
8460             (v16i8 Neon_AllZero),
8461             (v16i8 VPR128:$Rn))),
8462           (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
8463 def : Pat<(v8i8 (sub
8464             (v8i8 Neon_AllZero),
8465             (v8i8 VPR64:$Rn))),
8466           (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
8467 def : Pat<(v8i16 (sub
8468             (v8i16 (bitconvert (v16i8 Neon_AllZero))),
8469             (v8i16 VPR128:$Rn))),
8470           (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
8471 def : Pat<(v4i16 (sub
8472             (v4i16 (bitconvert (v8i8 Neon_AllZero))),
8473             (v4i16 VPR64:$Rn))),
8474           (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
8475 def : Pat<(v4i32 (sub
8476             (v4i32 (bitconvert (v16i8 Neon_AllZero))),
8477             (v4i32 VPR128:$Rn))),
8478           (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
8479 def : Pat<(v2i32 (sub
8480             (v2i32 (bitconvert (v8i8 Neon_AllZero))),
8481             (v2i32 VPR64:$Rn))),
8482           (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
8483 def : Pat<(v2i64 (sub
8484             (v2i64 (bitconvert (v16i8 Neon_AllZero))),
8485             (v2i64 VPR128:$Rn))),
8486           (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
8487
8488 multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
8489   let Constraints = "$src = $Rd" in {
8490     def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
8491                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8492                            asmop # "\t$Rd.16b, $Rn.16b",
8493                            [], NoItinerary>,
8494               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8495
8496     def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
8497                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8498                           asmop # "\t$Rd.8h, $Rn.8h",
8499                           [], NoItinerary>,
8500              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8501
8502     def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8503                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8504                           asmop # "\t$Rd.4s, $Rn.4s",
8505                           [], NoItinerary>,
8506              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8507
8508     def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
8509                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8510                           asmop # "\t$Rd.2d, $Rn.2d",
8511                           [], NoItinerary>,
8512              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8513
8514     def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
8515                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8516                           asmop # "\t$Rd.8b, $Rn.8b",
8517                           [], NoItinerary>,
8518              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8519
8520     def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
8521                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8522                           asmop # "\t$Rd.4h, $Rn.4h",
8523                           [], NoItinerary>,
8524              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8525
8526     def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8527                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
8528                           asmop # "\t$Rd.2s, $Rn.2s",
8529                           [], NoItinerary>,
8530              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8531   }
8532 }
8533
8534 defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
8535 defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
8536
8537 multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
8538                                            SDPatternOperator Neon_Op> {
8539   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
8540             (v16i8 (!cast<Instruction>(Prefix # 16b)
8541               (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
8542
8543   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
8544             (v8i16 (!cast<Instruction>(Prefix # 8h)
8545               (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
8546
8547   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
8548             (v4i32 (!cast<Instruction>(Prefix # 4s)
8549               (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
8550
8551   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
8552             (v2i64 (!cast<Instruction>(Prefix # 2d)
8553               (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
8554
8555   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
8556             (v8i8 (!cast<Instruction>(Prefix # 8b)
8557               (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
8558
8559   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
8560             (v4i16 (!cast<Instruction>(Prefix # 4h)
8561               (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
8562
8563   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
8564             (v2i32 (!cast<Instruction>(Prefix # 2s)
8565               (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
8566 }
8567
8568 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
8569 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
8570
8571 multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
8572                           SDPatternOperator Neon_Op> {
8573   def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
8574                          (outs VPR128:$Rd), (ins VPR128:$Rn),
8575                          asmop # "\t$Rd.16b, $Rn.16b",
8576                          [(set (v16i8 VPR128:$Rd),
8577                             (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
8578                          NoItinerary>,
8579             Sched<[WriteFPALU, ReadFPALU]>;
8580
8581   def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
8582                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8583                         asmop # "\t$Rd.8h, $Rn.8h",
8584                         [(set (v8i16 VPR128:$Rd),
8585                            (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
8586                         NoItinerary>,
8587            Sched<[WriteFPALU, ReadFPALU]>;
8588
8589   def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
8590                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8591                         asmop # "\t$Rd.4s, $Rn.4s",
8592                         [(set (v4i32 VPR128:$Rd),
8593                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
8594                         NoItinerary>,
8595            Sched<[WriteFPALU, ReadFPALU]>;
8596
8597   def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
8598                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8599                         asmop # "\t$Rd.8b, $Rn.8b",
8600                         [(set (v8i8 VPR64:$Rd),
8601                            (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
8602                         NoItinerary>,
8603            Sched<[WriteFPALU, ReadFPALU]>;
8604
8605   def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
8606                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8607                         asmop # "\t$Rd.4h, $Rn.4h",
8608                         [(set (v4i16 VPR64:$Rd),
8609                            (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
8610                         NoItinerary>,
8611            Sched<[WriteFPALU, ReadFPALU]>;
8612
8613   def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
8614                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8615                         asmop # "\t$Rd.2s, $Rn.2s",
8616                         [(set (v2i32 VPR64:$Rd),
8617                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
8618                         NoItinerary>,
8619            Sched<[WriteFPALU, ReadFPALU]>;
8620 }
8621
8622 defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
8623 defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
8624
8625 multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
8626                               bits<5> Opcode> {
8627   def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
8628                          (outs VPR128:$Rd), (ins VPR128:$Rn),
8629                          asmop # "\t$Rd.16b, $Rn.16b",
8630                          [], NoItinerary>,
8631             Sched<[WriteFPALU, ReadFPALU]>;
8632
8633   def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
8634                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8635                         asmop # "\t$Rd.8b, $Rn.8b",
8636                         [], NoItinerary>,
8637            Sched<[WriteFPALU, ReadFPALU]>;
8638 }
8639
8640 defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
8641 defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
8642 defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
8643
8644 def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
8645                     (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
8646 def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
8647                     (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
8648
8649 def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
8650           (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
8651 def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
8652           (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
8653
8654 def : Pat<(v16i8 (xor
8655             (v16i8 VPR128:$Rn),
8656             (v16i8 Neon_AllOne))),
8657           (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
8658 def : Pat<(v8i8 (xor
8659             (v8i8 VPR64:$Rn),
8660             (v8i8 Neon_AllOne))),
8661           (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
8662 def : Pat<(v8i16 (xor
8663             (v8i16 VPR128:$Rn),
8664             (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
8665           (NOT16b VPR128:$Rn)>;
8666 def : Pat<(v4i16 (xor
8667             (v4i16 VPR64:$Rn),
8668             (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
8669           (NOT8b VPR64:$Rn)>;
8670 def : Pat<(v4i32 (xor
8671             (v4i32 VPR128:$Rn),
8672             (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
8673           (NOT16b VPR128:$Rn)>;
8674 def : Pat<(v2i32 (xor
8675             (v2i32 VPR64:$Rn),
8676             (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
8677           (NOT8b VPR64:$Rn)>;
8678 def : Pat<(v2i64 (xor
8679             (v2i64 VPR128:$Rn),
8680             (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
8681           (NOT16b VPR128:$Rn)>;
8682
8683 def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
8684           (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
8685 def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
8686           (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
8687
8688 multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
8689                                 SDPatternOperator Neon_Op> {
8690   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8691                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8692                         asmop # "\t$Rd.4s, $Rn.4s",
8693                         [(set (v4f32 VPR128:$Rd),
8694                            (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
8695                         NoItinerary>,
8696            Sched<[WriteFPALU, ReadFPALU]>;
8697
8698   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
8699                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8700                         asmop # "\t$Rd.2d, $Rn.2d",
8701                         [(set (v2f64 VPR128:$Rd),
8702                            (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
8703                         NoItinerary>,
8704            Sched<[WriteFPALU, ReadFPALU]>;
8705
8706   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8707                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8708                         asmop # "\t$Rd.2s, $Rn.2s",
8709                         [(set (v2f32 VPR64:$Rd),
8710                            (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
8711                         NoItinerary>,
8712            Sched<[WriteFPALU, ReadFPALU]>;
8713 }
8714
8715 defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
8716 defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
8717
8718 multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
8719   def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
8720                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8721                           asmop # "\t$Rd.8b, $Rn.8h",
8722                           [], NoItinerary>,
8723              Sched<[WriteFPALU, ReadFPALU]>;
8724
8725   def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
8726                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8727                           asmop # "\t$Rd.4h, $Rn.4s",
8728                           [], NoItinerary>,
8729              Sched<[WriteFPALU, ReadFPALU]>;
8730
8731   def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
8732                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8733                           asmop # "\t$Rd.2s, $Rn.2d",
8734                           [], NoItinerary>,
8735              Sched<[WriteFPALU, ReadFPALU]>;
8736
8737   let Constraints = "$Rd = $src" in {
8738     def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
8739                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8740                              asmop # "2\t$Rd.16b, $Rn.8h",
8741                              [], NoItinerary>,
8742                 Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8743
8744     def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
8745                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8746                             asmop # "2\t$Rd.8h, $Rn.4s",
8747                             [], NoItinerary>,
8748                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8749
8750     def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
8751                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8752                             asmop # "2\t$Rd.4s, $Rn.2d",
8753                             [], NoItinerary>,
8754                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8755   }
8756 }
8757
8758 defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
8759 defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
8760 defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
8761 defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
8762
8763 multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix,
8764                                         SDPatternOperator Neon_Op> {
8765   def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
8766             (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
8767
8768   def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
8769             (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
8770
8771   def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
8772             (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
8773
8774   def : Pat<(v16i8 (concat_vectors
8775               (v8i8 VPR64:$src),
8776               (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
8777             (!cast<Instruction>(Prefix # 8h16b)
8778               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
8779               VPR128:$Rn)>;
8780
8781   def : Pat<(v8i16 (concat_vectors
8782               (v4i16 VPR64:$src),
8783               (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
8784             (!cast<Instruction>(Prefix # 4s8h)
8785               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
8786               VPR128:$Rn)>;
8787
8788   def : Pat<(v4i32 (concat_vectors
8789               (v2i32 VPR64:$src),
8790               (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
8791             (!cast<Instruction>(Prefix # 2d4s)
8792               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
8793               VPR128:$Rn)>;
8794 }
8795
8796 defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
8797 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
8798 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
8799 defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
8800
8801 multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
8802   let DecoderMethod = "DecodeSHLLInstruction" in {
8803     def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8804                             (outs VPR128:$Rd),
8805                             (ins VPR64:$Rn, uimm_exact8:$Imm),
8806                             asmop # "\t$Rd.8h, $Rn.8b, $Imm",
8807                             [], NoItinerary>,
8808                Sched<[WriteFPALU, ReadFPALU]>;
8809
8810     def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8811                             (outs VPR128:$Rd),
8812                             (ins VPR64:$Rn, uimm_exact16:$Imm),
8813                             asmop # "\t$Rd.4s, $Rn.4h, $Imm",
8814                             [], NoItinerary>,
8815                Sched<[WriteFPALU, ReadFPALU]>;
8816
8817     def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
8818                             (outs VPR128:$Rd),
8819                             (ins VPR64:$Rn, uimm_exact32:$Imm),
8820                             asmop # "\t$Rd.2d, $Rn.2s, $Imm",
8821                             [], NoItinerary>,
8822                Sched<[WriteFPALU, ReadFPALU]>;
8823
8824     def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8825                             (outs VPR128:$Rd),
8826                             (ins VPR128:$Rn, uimm_exact8:$Imm),
8827                             asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
8828                             [], NoItinerary>,
8829                 Sched<[WriteFPALU, ReadFPALU]>;
8830
8831     def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8832                             (outs VPR128:$Rd),
8833                             (ins VPR128:$Rn, uimm_exact16:$Imm),
8834                             asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
8835                             [], NoItinerary>,
8836                Sched<[WriteFPALU, ReadFPALU]>;
8837
8838     def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
8839                             (outs VPR128:$Rd),
8840                             (ins VPR128:$Rn, uimm_exact32:$Imm),
8841                             asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
8842                             [], NoItinerary>,
8843                Sched<[WriteFPALU, ReadFPALU]>;
8844   }
8845 }
8846
8847 defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
8848
8849 class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
8850                           SDPatternOperator ExtOp, Operand Neon_Imm,
8851                           string suffix>
8852   : Pat<(DesTy (shl
8853           (DesTy (ExtOp (OpTy VPR64:$Rn))),
8854             (DesTy (Neon_vdup
8855               (i32 Neon_Imm:$Imm))))),
8856         (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
8857
8858 class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
8859                                SDPatternOperator ExtOp, Operand Neon_Imm,
8860                                string suffix, PatFrag GetHigh>
8861   : Pat<(DesTy (shl
8862           (DesTy (ExtOp
8863             (OpTy (GetHigh VPR128:$Rn)))),
8864               (DesTy (Neon_vdup
8865                 (i32 Neon_Imm:$Imm))))),
8866         (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
8867
8868 def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
8869 def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
8870 def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
8871 def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
8872 def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
8873 def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
8874 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
8875                                Neon_High16B>;
8876 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
8877                                Neon_High16B>;
8878 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
8879                                Neon_High8H>;
8880 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
8881                                Neon_High8H>;
8882 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
8883                                Neon_High4S>;
8884 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
8885                                Neon_High4S>;
8886
8887 multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
8888   def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
8889                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8890                           asmop # "\t$Rd.4h, $Rn.4s",
8891                           [], NoItinerary>,
8892              Sched<[WriteFPALU, ReadFPALU]>;
8893
8894   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8895                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8896                           asmop # "\t$Rd.2s, $Rn.2d",
8897                           [], NoItinerary>,
8898              Sched<[WriteFPALU, ReadFPALU]>;
8899
8900   let Constraints = "$src = $Rd" in {
8901     def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
8902                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8903                             asmop # "2\t$Rd.8h, $Rn.4s",
8904                             [], NoItinerary>,
8905                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8906
8907     def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8908                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8909                             asmop # "2\t$Rd.4s, $Rn.2d",
8910                             [], NoItinerary>,
8911                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
8912   }
8913 }
8914
8915 defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
8916
8917 multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
8918                                        SDPatternOperator f32_to_f16_Op,
8919                                        SDPatternOperator f64_to_f32_Op> {
8920
8921   def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
8922               (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
8923
8924   def : Pat<(v8i16 (concat_vectors
8925                 (v4i16 VPR64:$src),
8926                 (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
8927                   (!cast<Instruction>(prefix # "4s8h")
8928                     (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
8929                     (v4f32 VPR128:$Rn))>;
8930
8931   def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
8932             (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
8933
8934   def : Pat<(v4f32 (concat_vectors
8935               (v2f32 VPR64:$src),
8936               (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
8937                 (!cast<Instruction>(prefix # "2d4s")
8938                   (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
8939                   (v2f64 VPR128:$Rn))>;
8940 }
8941
8942 defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
8943
8944 multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
8945                                  bits<5> opcode> {
8946   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
8947                           (outs VPR64:$Rd), (ins VPR128:$Rn),
8948                           asmop # "\t$Rd.2s, $Rn.2d",
8949                           [], NoItinerary>,
8950              Sched<[WriteFPALU, ReadFPALU]>;
8951
8952   def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
8953                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8954                           asmop # "2\t$Rd.4s, $Rn.2d",
8955                           [], NoItinerary>,
8956              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
8957     let Constraints = "$src = $Rd";
8958   }
8959
8960   def : Pat<(v2f32 (int_aarch64_neon_vcvtxn (v2f64 VPR128:$Rn))),
8961             (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
8962
8963   def : Pat<(v4f32 (concat_vectors
8964               (v2f32 VPR64:$src),
8965               (v2f32 (int_aarch64_neon_vcvtxn (v2f64 VPR128:$Rn))))),
8966             (!cast<Instruction>(prefix # "2d4s")
8967                (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
8968                VPR128:$Rn)>;
8969 }
8970
8971 defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
8972
8973 def Neon_High4Float : PatFrag<(ops node:$in),
8974                               (extract_subvector (v4f32 node:$in), (iPTR 2))>;
8975
8976 multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
8977   def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
8978                           (outs VPR128:$Rd), (ins VPR64:$Rn),
8979                           asmop # "\t$Rd.4s, $Rn.4h",
8980                           [], NoItinerary>,
8981              Sched<[WriteFPALU, ReadFPALU]>;
8982
8983   def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
8984                           (outs VPR128:$Rd), (ins VPR64:$Rn),
8985                           asmop # "\t$Rd.2d, $Rn.2s",
8986                           [], NoItinerary>,
8987              Sched<[WriteFPALU, ReadFPALU]>;
8988
8989   def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
8990                           (outs VPR128:$Rd), (ins VPR128:$Rn),
8991                           asmop # "2\t$Rd.4s, $Rn.8h",
8992                           [], NoItinerary>,
8993              Sched<[WriteFPALU, ReadFPALU]>;
8994
8995   def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
8996                           (outs VPR128:$Rd), (ins VPR128:$Rn),
8997                           asmop # "2\t$Rd.2d, $Rn.4s",
8998                           [], NoItinerary>,
8999              Sched<[WriteFPALU, ReadFPALU]>;
9000 }
9001
9002 defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
9003
9004 multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
9005   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
9006             (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
9007
9008   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
9009               (v4i16 (Neon_High8H
9010                 (v8i16 VPR128:$Rn))))),
9011             (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
9012
9013   def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
9014             (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
9015
9016   def : Pat<(v2f64 (fextend
9017               (v2f32 (Neon_High4Float
9018                 (v4f32 VPR128:$Rn))))),
9019             (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
9020 }
9021
9022 defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
9023
9024 multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
9025                                 ValueType ResTy4s, ValueType OpTy4s,
9026                                 ValueType ResTy2d, ValueType OpTy2d,
9027                                 ValueType ResTy2s, ValueType OpTy2s,
9028                                 SDPatternOperator Neon_Op> {
9029
9030   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
9031                         (outs VPR128:$Rd), (ins VPR128:$Rn),
9032                         asmop # "\t$Rd.4s, $Rn.4s",
9033                         [(set (ResTy4s VPR128:$Rd),
9034                            (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
9035                         NoItinerary>,
9036            Sched<[WriteFPALU, ReadFPALU]>;
9037
9038   def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
9039                         (outs VPR128:$Rd), (ins VPR128:$Rn),
9040                         asmop # "\t$Rd.2d, $Rn.2d",
9041                         [(set (ResTy2d VPR128:$Rd),
9042                            (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
9043                         NoItinerary>,
9044            Sched<[WriteFPALU, ReadFPALU]>;
9045
9046   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
9047                         (outs VPR64:$Rd), (ins VPR64:$Rn),
9048                         asmop # "\t$Rd.2s, $Rn.2s",
9049                         [(set (ResTy2s VPR64:$Rd),
9050                            (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
9051                         NoItinerary>,
9052            Sched<[WriteFPALU, ReadFPALU]>;
9053 }
9054
9055 multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
9056                                   bits<5> opcode, SDPatternOperator Neon_Op> {
9057   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
9058                                 v2f64, v2i32, v2f32, Neon_Op>;
9059 }
9060
9061 defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
9062                                      int_arm_neon_vcvtns>;
9063 defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
9064                                      int_arm_neon_vcvtnu>;
9065 defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
9066                                      int_arm_neon_vcvtps>;
9067 defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
9068                                      int_arm_neon_vcvtpu>;
9069 defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
9070                                      int_arm_neon_vcvtms>;
9071 defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
9072                                      int_arm_neon_vcvtmu>;
9073 defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
9074 defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
9075 defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
9076                                      int_arm_neon_vcvtas>;
9077 defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
9078                                      int_arm_neon_vcvtau>;
9079
9080 multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
9081                                   bits<5> opcode, SDPatternOperator Neon_Op> {
9082   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
9083                                 v2i64, v2f32, v2i32, Neon_Op>;
9084 }
9085
9086 defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
9087 defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
9088
9089 multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
9090                                  bits<5> opcode, SDPatternOperator Neon_Op> {
9091   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
9092                                 v2f64, v2f32, v2f32, Neon_Op>;
9093 }
9094
9095 defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
9096                                      int_aarch64_neon_frintn>;
9097 defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
9098 defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
9099 defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
9100 defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
9101 defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
9102 defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
9103 defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
9104                                     int_arm_neon_vrecpe>;
9105 defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
9106                                      int_arm_neon_vrsqrte>;
9107 let SchedRW = [WriteFPSqrt, ReadFPSqrt] in {
9108 defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111, fsqrt>;
9109 }
9110
9111 multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
9112                                bits<5> opcode, SDPatternOperator Neon_Op> {
9113   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
9114                         (outs VPR128:$Rd), (ins VPR128:$Rn),
9115                         asmop # "\t$Rd.4s, $Rn.4s",
9116                         [(set (v4i32 VPR128:$Rd),
9117                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
9118                         NoItinerary>,
9119            Sched<[WriteFPALU, ReadFPALU]>;
9120
9121   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
9122                         (outs VPR64:$Rd), (ins VPR64:$Rn),
9123                         asmop # "\t$Rd.2s, $Rn.2s",
9124                         [(set (v2i32 VPR64:$Rd),
9125                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
9126                         NoItinerary>,
9127            Sched<[WriteFPALU, ReadFPALU]>;
9128 }
9129
9130 defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
9131                                   int_arm_neon_vrecpe>;
9132 defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
9133                                    int_arm_neon_vrsqrte>;
9134
9135 // Crypto Class
9136 class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
9137                          string asmop, SDPatternOperator opnode>
9138   : NeonI_Crypto_AES<size, opcode,
9139                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
9140                      asmop # "\t$Rd.16b, $Rn.16b",
9141                      [(set (v16i8 VPR128:$Rd),
9142                         (v16i8 (opnode (v16i8 VPR128:$src),
9143                                        (v16i8 VPR128:$Rn))))],
9144                      NoItinerary>,
9145     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
9146   let Constraints = "$src = $Rd";
9147   let Predicates = [HasNEON, HasCrypto];
9148 }
9149
9150 def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
9151 def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
9152
9153 class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
9154                       string asmop, SDPatternOperator opnode>
9155   : NeonI_Crypto_AES<size, opcode,
9156                      (outs VPR128:$Rd), (ins VPR128:$Rn),
9157                      asmop # "\t$Rd.16b, $Rn.16b",
9158                      [(set (v16i8 VPR128:$Rd),
9159                         (v16i8 (opnode (v16i8 VPR128:$Rn))))],
9160                      NoItinerary>,
9161     Sched<[WriteFPALU, ReadFPALU]>;
9162
9163 def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
9164 def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
9165
9166 class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
9167                          string asmop, SDPatternOperator opnode>
9168   : NeonI_Crypto_SHA<size, opcode,
9169                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
9170                      asmop # "\t$Rd.4s, $Rn.4s",
9171                      [(set (v4i32 VPR128:$Rd),
9172                         (v4i32 (opnode (v4i32 VPR128:$src),
9173                                        (v4i32 VPR128:$Rn))))],
9174                      NoItinerary>,
9175     Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
9176   let Constraints = "$src = $Rd";
9177   let Predicates = [HasNEON, HasCrypto];
9178 }
9179
9180 def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
9181                                  int_arm_neon_sha1su1>;
9182 def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
9183                                    int_arm_neon_sha256su0>;
9184
9185 class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
9186                          string asmop, SDPatternOperator opnode>
9187   : NeonI_Crypto_SHA<size, opcode,
9188                      (outs FPR32:$Rd), (ins FPR32:$Rn),
9189                      asmop # "\t$Rd, $Rn",
9190                      [], NoItinerary>,
9191     Sched<[WriteFPALU, ReadFPALU]> {
9192   let Predicates = [HasNEON, HasCrypto];
9193   let hasSideEffects = 0;
9194 }
9195
9196 def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
9197 def : Pat<(i32 (int_arm_neon_sha1h i32:$Rn)),
9198           (COPY_TO_REGCLASS (SHA1H (COPY_TO_REGCLASS i32:$Rn, FPR32)), GPR32)>;
9199
9200
9201 class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
9202                            SDPatternOperator opnode>
9203   : NeonI_Crypto_3VSHA<size, opcode,
9204                        (outs VPR128:$Rd),
9205                        (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
9206                        asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
9207                        [(set (v4i32 VPR128:$Rd),
9208                           (v4i32 (opnode (v4i32 VPR128:$src),
9209                                          (v4i32 VPR128:$Rn),
9210                                          (v4i32 VPR128:$Rm))))],
9211                        NoItinerary>,
9212     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
9213   let Constraints = "$src = $Rd";
9214   let Predicates = [HasNEON, HasCrypto];
9215 }
9216
9217 def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
9218                                    int_arm_neon_sha1su0>;
9219 def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
9220                                      int_arm_neon_sha256su1>;
9221
9222 class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
9223                            SDPatternOperator opnode>
9224   : NeonI_Crypto_3VSHA<size, opcode,
9225                        (outs FPR128:$Rd),
9226                        (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
9227                        asmop # "\t$Rd, $Rn, $Rm.4s",
9228                        [(set (v4i32 FPR128:$Rd),
9229                           (v4i32 (opnode (v4i32 FPR128:$src),
9230                                          (v4i32 FPR128:$Rn),
9231                                          (v4i32 VPR128:$Rm))))],
9232                        NoItinerary>,
9233     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
9234   let Constraints = "$src = $Rd";
9235   let Predicates = [HasNEON, HasCrypto];
9236 }
9237
9238 def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
9239                                    int_arm_neon_sha256h>;
9240 def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
9241                                     int_arm_neon_sha256h2>;
9242
9243 class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop>
9244   : NeonI_Crypto_3VSHA<size, opcode,
9245                        (outs FPR128:$Rd),
9246                        (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
9247                        asmop # "\t$Rd, $Rn, $Rm.4s",
9248                        [], NoItinerary>,
9249     Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
9250   let Constraints = "$src = $Rd";
9251   let hasSideEffects = 0;
9252   let Predicates = [HasNEON, HasCrypto];
9253 }
9254
9255 def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c">;
9256 def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p">;
9257 def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m">;
9258
9259 def : Pat<(int_arm_neon_sha1c v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
9260           (SHA1C v4i32:$hash_abcd,
9261                  (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
9262 def : Pat<(int_arm_neon_sha1m v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
9263           (SHA1M v4i32:$hash_abcd,
9264                  (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
9265 def : Pat<(int_arm_neon_sha1p v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
9266           (SHA1P v4i32:$hash_abcd,
9267                  (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
9268
9269 // Additional patterns to match shl to USHL.
9270 def : Pat<(v8i8 (shl (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
9271           (USHLvvv_8B $Rn, $Rm)>;
9272 def : Pat<(v4i16 (shl (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
9273           (USHLvvv_4H $Rn, $Rm)>;
9274 def : Pat<(v2i32 (shl (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
9275           (USHLvvv_2S $Rn, $Rm)>;
9276 def : Pat<(v1i64 (shl (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
9277           (USHLddd $Rn, $Rm)>;
9278 def : Pat<(v16i8 (shl (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
9279           (USHLvvv_16B $Rn, $Rm)>;
9280 def : Pat<(v8i16 (shl (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
9281           (USHLvvv_8H $Rn, $Rm)>;
9282 def : Pat<(v4i32 (shl (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
9283           (USHLvvv_4S $Rn, $Rm)>;
9284 def : Pat<(v2i64 (shl (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
9285           (USHLvvv_2D $Rn, $Rm)>;
9286
9287 def : Pat<(v1i8 (shl (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
9288           (EXTRACT_SUBREG
9289               (USHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
9290                           (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
9291               sub_8)>;
9292 def : Pat<(v1i16 (shl (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
9293           (EXTRACT_SUBREG
9294               (USHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
9295                           (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
9296               sub_16)>;
9297 def : Pat<(v1i32 (shl (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
9298           (EXTRACT_SUBREG
9299               (USHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
9300                           (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
9301               sub_32)>;
9302
9303 // Additional patterns to match sra, srl.
9304 // For a vector right shift by vector, the shift amounts of SSHL/USHL are
9305 // negative. Negate the vector of shift amount first.
9306 def : Pat<(v8i8 (srl (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
9307           (USHLvvv_8B $Rn, (NEG8b $Rm))>;
9308 def : Pat<(v4i16 (srl (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
9309           (USHLvvv_4H $Rn, (NEG4h $Rm))>;
9310 def : Pat<(v2i32 (srl (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
9311           (USHLvvv_2S $Rn, (NEG2s $Rm))>;
9312 def : Pat<(v1i64 (srl (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
9313           (USHLddd $Rn, (NEGdd $Rm))>;
9314 def : Pat<(v16i8 (srl (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
9315           (USHLvvv_16B $Rn, (NEG16b $Rm))>;
9316 def : Pat<(v8i16 (srl (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
9317           (USHLvvv_8H $Rn, (NEG8h $Rm))>;
9318 def : Pat<(v4i32 (srl (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
9319           (USHLvvv_4S $Rn, (NEG4s $Rm))>;
9320 def : Pat<(v2i64 (srl (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
9321           (USHLvvv_2D $Rn, (NEG2d $Rm))>;
9322
9323 def : Pat<(v1i8 (srl (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
9324           (EXTRACT_SUBREG
9325               (USHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
9326                           (NEG8b (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8))),
9327               sub_8)>;
9328 def : Pat<(v1i16 (srl (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
9329           (EXTRACT_SUBREG
9330               (USHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
9331                           (NEG4h (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16))),
9332               sub_16)>;
9333 def : Pat<(v1i32 (srl (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
9334           (EXTRACT_SUBREG
9335               (USHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
9336                           (NEG2s (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32))),
9337               sub_32)>;
9338
9339 def : Pat<(v8i8 (sra (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
9340           (SSHLvvv_8B $Rn, (NEG8b $Rm))>;
9341 def : Pat<(v4i16 (sra (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
9342           (SSHLvvv_4H $Rn, (NEG4h $Rm))>;
9343 def : Pat<(v2i32 (sra (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
9344           (SSHLvvv_2S $Rn, (NEG2s $Rm))>;
9345 def : Pat<(v1i64 (sra (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
9346           (SSHLddd $Rn, (NEGdd $Rm))>;
9347 def : Pat<(v16i8 (sra (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
9348           (SSHLvvv_16B $Rn, (NEG16b $Rm))>;
9349 def : Pat<(v8i16 (sra (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
9350           (SSHLvvv_8H $Rn, (NEG8h $Rm))>;
9351 def : Pat<(v4i32 (sra (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
9352           (SSHLvvv_4S $Rn, (NEG4s $Rm))>;
9353 def : Pat<(v2i64 (sra (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
9354           (SSHLvvv_2D $Rn, (NEG2d $Rm))>;
9355
9356 def : Pat<(v1i8 (sra (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
9357           (EXTRACT_SUBREG
9358               (SSHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
9359                           (NEG8b (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8))),
9360               sub_8)>;
9361 def : Pat<(v1i16 (sra (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
9362           (EXTRACT_SUBREG
9363               (SSHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
9364                           (NEG4h (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16))),
9365               sub_16)>;
9366 def : Pat<(v1i32 (sra (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
9367           (EXTRACT_SUBREG
9368               (SSHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
9369                           (NEG2s (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32))),
9370               sub_32)>;
9371
9372 //
9373 // Patterns for handling half-precision values
9374 //
9375
9376 // Convert between f16 value and f32 value
9377 def : Pat<(f32 (f16_to_f32 (i32 GPR32:$Rn))),
9378           (FCVTsh (EXTRACT_SUBREG (FMOVsw $Rn), sub_16))>;
9379 def : Pat<(i32 (f32_to_f16 (f32 FPR32:$Rn))),
9380           (FMOVws (SUBREG_TO_REG (i64 0), (f16 (FCVThs $Rn)), sub_16))>;
9381
9382 // Convert f16 value coming in as i16 value to f32
9383 def : Pat<(f32 (f16_to_f32 (i32 (and (i32 GPR32:$Rn), 65535)))),
9384           (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
9385 def : Pat<(f32 (f16_to_f32 (i32 (assertzext GPR32:$Rn)))),
9386           (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
9387
9388 def : Pat<(f32 (f16_to_f32 (i32 (assertzext (i32 (
9389             f32_to_f16 (f32 FPR32:$Rn))))))),
9390           (f32 FPR32:$Rn)>;
9391
9392 // Patterns for vector extract of half-precision FP value in i16 storage type
9393 def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
9394             (v4i16 VPR64:$Rn), neon_uimm2_bare:$Imm)), 65535)))),
9395           (FCVTsh (f16 (DUPhv_H
9396             (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9397             neon_uimm2_bare:$Imm)))>;
9398
9399 def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
9400             (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)), 65535)))),
9401           (FCVTsh (f16 (DUPhv_H (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)))>;
9402
9403 // Patterns for vector insert of half-precision FP value 0 in i16 storage type
9404 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9405             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
9406             (neon_uimm3_bare:$Imm))),
9407           (v8i16 (INSELh (v8i16 VPR128:$Rn),
9408             (v8i16 (SUBREG_TO_REG (i64 0),
9409               (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
9410               sub_16)),
9411             neon_uimm3_bare:$Imm, 0))>;
9412
9413 def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
9414             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
9415             (neon_uimm2_bare:$Imm))),
9416           (v4i16 (EXTRACT_SUBREG
9417             (v8i16 (INSELh
9418               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9419               (v8i16 (SUBREG_TO_REG (i64 0),
9420                 (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
9421                 sub_16)),
9422               neon_uimm2_bare:$Imm, 0)),
9423             sub_64))>;
9424
9425 // Patterns for vector insert of half-precision FP value in i16 storage type
9426 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9427             (i32 (assertsext (i32 (fp_to_sint
9428               (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
9429             (neon_uimm3_bare:$Imm))),
9430           (v8i16 (INSELh (v8i16 VPR128:$Rn),
9431             (v8i16 (SUBREG_TO_REG (i64 0),
9432               (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
9433               sub_16)),
9434             neon_uimm3_bare:$Imm, 0))>;
9435
9436 def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
9437             (i32 (assertsext (i32 (fp_to_sint
9438               (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
9439             (neon_uimm2_bare:$Imm))),
9440           (v4i16 (EXTRACT_SUBREG
9441             (v8i16 (INSELh
9442               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9443               (v8i16 (SUBREG_TO_REG (i64 0),
9444                 (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
9445                 sub_16)),
9446               neon_uimm2_bare:$Imm, 0)),
9447             sub_64))>;
9448
9449 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9450             (i32 (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
9451               (neon_uimm3_bare:$Imm1))),
9452           (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
9453             neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
9454
9455 // Patterns for vector copy of half-precision FP value in i16 storage type
9456 def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
9457             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
9458               (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
9459               65535)))))))),
9460             (neon_uimm3_bare:$Imm1))),
9461           (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
9462             neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
9463
9464 def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
9465             (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
9466               (vector_extract (v4i16 VPR64:$src), neon_uimm3_bare:$Imm2)),
9467               65535)))))))),
9468             (neon_uimm3_bare:$Imm1))),
9469           (v4i16 (EXTRACT_SUBREG
9470             (v8i16 (INSELh
9471               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
9472               (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
9473               neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2)),
9474             sub_64))>;
9475
9476