Fix the AArch64 NEON bug exposed by checking constant integer argument range of ACLE...
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrNEON.td
1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the AArch64 NEON instruction set.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17 def Neon_bsl       : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
18                       [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
19                       SDTCisSameAs<0, 3>]>>;
20
21 // (outs Result), (ins Imm, OpCmode)
22 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
23
24 def Neon_movi     : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
25
26 def Neon_mvni     : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
27
28 // (outs Result), (ins Imm)
29 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
30                         [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
31
32 // (outs Result), (ins LHS, RHS, CondCode)
33 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
34                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
35
36 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
37 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
38                  [SDTCisVec<0>,  SDTCisVec<1>]>>;
39
40 // (outs Result), (ins LHS, RHS)
41 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
42                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
43
44 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
45                                      SDTCisVT<2, i32>]>;
46 def Neon_sqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
47 def Neon_uqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
48
49 def SDTPERMUTE : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
50                                SDTCisSameAs<0, 2>]>;
51 def Neon_uzp1    : SDNode<"AArch64ISD::NEON_UZP1", SDTPERMUTE>;
52 def Neon_uzp2    : SDNode<"AArch64ISD::NEON_UZP2", SDTPERMUTE>;
53 def Neon_zip1    : SDNode<"AArch64ISD::NEON_ZIP1", SDTPERMUTE>;
54 def Neon_zip2    : SDNode<"AArch64ISD::NEON_ZIP2", SDTPERMUTE>;
55 def Neon_trn1    : SDNode<"AArch64ISD::NEON_TRN1", SDTPERMUTE>;
56 def Neon_trn2    : SDNode<"AArch64ISD::NEON_TRN2", SDTPERMUTE>;
57
58 def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
59 def Neon_rev64    : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
60 def Neon_rev32    : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
61 def Neon_rev16    : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
62 def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
63                        [SDTCisVec<0>]>>;
64 def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
65                            [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
66 def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
67                            [SDTCisVec<0>,  SDTCisSameAs<0, 1>,
68                            SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
69
70 //===----------------------------------------------------------------------===//
71 // Multiclasses
72 //===----------------------------------------------------------------------===//
73
74 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size,  bits<5> opcode,
75                                 string asmop, SDPatternOperator opnode8B,
76                                 SDPatternOperator opnode16B,
77                                 bit Commutable = 0> {
78   let isCommutable = Commutable in {
79     def _8B :  NeonI_3VSame<0b0, u, size, opcode,
80                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
81                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
82                [(set (v8i8 VPR64:$Rd),
83                   (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
84                NoItinerary>;
85
86     def _16B : NeonI_3VSame<0b1, u, size, opcode,
87                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
88                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
89                [(set (v16i8 VPR128:$Rd),
90                   (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
91                NoItinerary>;
92   }
93
94 }
95
96 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
97                                   string asmop, SDPatternOperator opnode,
98                                   bit Commutable = 0> {
99   let isCommutable = Commutable in {
100     def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
101               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
102               asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
103               [(set (v4i16 VPR64:$Rd),
104                  (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
105               NoItinerary>;
106
107     def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
108               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
109               asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
110               [(set (v8i16 VPR128:$Rd),
111                  (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
112               NoItinerary>;
113
114     def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
115               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
116               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
117               [(set (v2i32 VPR64:$Rd),
118                  (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
119               NoItinerary>;
120
121     def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
122               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
123               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
124               [(set (v4i32 VPR128:$Rd),
125                  (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
126               NoItinerary>;
127   }
128 }
129 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
130                                   string asmop, SDPatternOperator opnode,
131                                   bit Commutable = 0>
132    : NeonI_3VSame_HS_sizes<u, opcode,  asmop, opnode, Commutable> {
133   let isCommutable = Commutable in {
134     def _8B :  NeonI_3VSame<0b0, u, 0b00, opcode,
135                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
136                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
137                [(set (v8i8 VPR64:$Rd),
138                   (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
139                NoItinerary>;
140
141     def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
142                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
143                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
144                [(set (v16i8 VPR128:$Rd),
145                   (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
146                NoItinerary>;
147   }
148 }
149
150 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
151                                    string asmop, SDPatternOperator opnode,
152                                    bit Commutable = 0>
153    : NeonI_3VSame_BHS_sizes<u, opcode,  asmop, opnode, Commutable> {
154   let isCommutable = Commutable in {
155     def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
156               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
157               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
158               [(set (v2i64 VPR128:$Rd),
159                  (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
160               NoItinerary>;
161   }
162 }
163
164 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
165 // but Result types can be integer or floating point types.
166 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
167                                  string asmop, SDPatternOperator opnode2S,
168                                  SDPatternOperator opnode4S,
169                                  SDPatternOperator opnode2D,
170                                  ValueType ResTy2S, ValueType ResTy4S,
171                                  ValueType ResTy2D, bit Commutable = 0> {
172   let isCommutable = Commutable in {
173     def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
174               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
175               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
176               [(set (ResTy2S VPR64:$Rd),
177                  (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
178               NoItinerary>;
179
180     def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
181               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
182               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
183               [(set (ResTy4S VPR128:$Rd),
184                  (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
185               NoItinerary>;
186
187     def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
188               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
189               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
190               [(set (ResTy2D VPR128:$Rd),
191                  (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
192                NoItinerary>;
193   }
194 }
195
196 //===----------------------------------------------------------------------===//
197 // Instruction Definitions
198 //===----------------------------------------------------------------------===//
199
200 // Vector Arithmetic Instructions
201
202 // Vector Add (Integer and Floating-Point)
203
204 defm ADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
205 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
206                                      v2f32, v4f32, v2f64, 1>;
207
208 // Vector Sub (Integer and Floating-Point)
209
210 defm SUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
211 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
212                                      v2f32, v4f32, v2f64, 0>;
213
214 // Vector Multiply (Integer and Floating-Point)
215
216 defm MULvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
217 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
218                                      v2f32, v4f32, v2f64, 1>;
219
220 // Vector Multiply (Polynomial)
221
222 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
223                                     int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
224
225 // Vector Multiply-accumulate and Multiply-subtract (Integer)
226
227 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
228 // two operands constraints.
229 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
230   RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size, 
231   bits<5> opcode, SDPatternOperator opnode>
232   : NeonI_3VSame<q, u, size, opcode,
233     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
234     asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
235     [(set (OpTy VPRC:$Rd),
236        (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
237     NoItinerary> {
238   let Constraints = "$src = $Rd";
239 }
240
241 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
242                        (add node:$Ra, (mul node:$Rn, node:$Rm))>;
243
244 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
245                        (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
246
247
248 def MLAvvv_8B:  NeonI_3VSame_Constraint_impl<"mla", ".8b",  VPR64,  v8i8,
249                                              0b0, 0b0, 0b00, 0b10010, Neon_mla>;
250 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
251                                              0b1, 0b0, 0b00, 0b10010, Neon_mla>;
252 def MLAvvv_4H:  NeonI_3VSame_Constraint_impl<"mla", ".4h",  VPR64,  v4i16,
253                                              0b0, 0b0, 0b01, 0b10010, Neon_mla>;
254 def MLAvvv_8H:  NeonI_3VSame_Constraint_impl<"mla", ".8h",  VPR128, v8i16,
255                                              0b1, 0b0, 0b01, 0b10010, Neon_mla>;
256 def MLAvvv_2S:  NeonI_3VSame_Constraint_impl<"mla", ".2s",  VPR64,  v2i32,
257                                              0b0, 0b0, 0b10, 0b10010, Neon_mla>;
258 def MLAvvv_4S:  NeonI_3VSame_Constraint_impl<"mla", ".4s",  VPR128, v4i32,
259                                              0b1, 0b0, 0b10, 0b10010, Neon_mla>;
260
261 def MLSvvv_8B:  NeonI_3VSame_Constraint_impl<"mls", ".8b",  VPR64,  v8i8,
262                                              0b0, 0b1, 0b00, 0b10010, Neon_mls>;
263 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
264                                              0b1, 0b1, 0b00, 0b10010, Neon_mls>;
265 def MLSvvv_4H:  NeonI_3VSame_Constraint_impl<"mls", ".4h",  VPR64,  v4i16,
266                                              0b0, 0b1, 0b01, 0b10010, Neon_mls>;
267 def MLSvvv_8H:  NeonI_3VSame_Constraint_impl<"mls", ".8h",  VPR128, v8i16,
268                                              0b1, 0b1, 0b01, 0b10010, Neon_mls>;
269 def MLSvvv_2S:  NeonI_3VSame_Constraint_impl<"mls", ".2s",  VPR64,  v2i32,
270                                              0b0, 0b1, 0b10, 0b10010, Neon_mls>;
271 def MLSvvv_4S:  NeonI_3VSame_Constraint_impl<"mls", ".4s",  VPR128, v4i32,
272                                              0b1, 0b1, 0b10, 0b10010, Neon_mls>;
273
274 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
275
276 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
277                         (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
278
279 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
280                         (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
281
282 let Predicates = [HasNEON, UseFusedMAC] in {
283 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s",  VPR64,  v2f32,
284                                              0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
285 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s",  VPR128, v4f32,
286                                              0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
287 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d",  VPR128, v2f64,
288                                              0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
289
290 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s",  VPR64,  v2f32,
291                                               0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
292 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s",  VPR128, v4f32,
293                                              0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
294 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d",  VPR128, v2f64,
295                                              0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
296 }
297
298 // We're also allowed to match the fma instruction regardless of compile
299 // options.
300 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
301           (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
302 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
303           (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
304 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
305           (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
306
307 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
308           (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
309 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
310           (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
311 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
312           (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
313
314 // Vector Divide (Floating-Point)
315
316 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
317                                      v2f32, v4f32, v2f64, 0>;
318
319 // Vector Bitwise Operations
320
321 // Vector Bitwise AND
322
323 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
324
325 // Vector Bitwise Exclusive OR
326
327 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
328
329 // Vector Bitwise OR
330
331 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
332
333 // ORR disassembled as MOV if Vn==Vm
334
335 // Vector Move - register
336 // Alias for ORR if Vn=Vm.
337 // FIXME: This is actually the preferred syntax but TableGen can't deal with
338 // custom printing of aliases.
339 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
340                     (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
341 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
342                     (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
343
344 // The MOVI instruction takes two immediate operands.  The first is the
345 // immediate encoding, while the second is the cmode.  A cmode of 14, or
346 // 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
347 def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
348 def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
349
350 def Neon_not8B  : PatFrag<(ops node:$in),
351                           (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
352 def Neon_not16B : PatFrag<(ops node:$in),
353                           (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
354
355 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
356                          (or node:$Rn, (Neon_not8B node:$Rm))>;
357
358 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
359                           (or node:$Rn, (Neon_not16B node:$Rm))>;
360
361 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
362                          (and node:$Rn, (Neon_not8B node:$Rm))>;
363
364 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
365                           (and node:$Rn, (Neon_not16B node:$Rm))>;
366
367
368 // Vector Bitwise OR NOT - register
369
370 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
371                                    Neon_orn8B, Neon_orn16B, 0>;
372
373 // Vector Bitwise Bit Clear (AND NOT) - register
374
375 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
376                                    Neon_bic8B, Neon_bic16B, 0>;
377
378 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
379                                    SDPatternOperator opnode16B,
380                                    Instruction INST8B,
381                                    Instruction INST16B> {
382   def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
383             (INST8B VPR64:$Rn, VPR64:$Rm)>;
384   def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
385             (INST8B VPR64:$Rn, VPR64:$Rm)>;
386   def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
387             (INST8B VPR64:$Rn, VPR64:$Rm)>;
388   def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
389             (INST16B VPR128:$Rn, VPR128:$Rm)>;
390   def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
391             (INST16B VPR128:$Rn, VPR128:$Rm)>;
392   def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
393             (INST16B VPR128:$Rn, VPR128:$Rm)>;
394 }
395
396 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
397 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
398 defm : Neon_bitwise2V_patterns<or,  or,  ORRvvv_8B, ORRvvv_16B>;
399 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
400 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
401 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
402
403 //   Vector Bitwise Select
404 def BSLvvv_8B  : NeonI_3VSame_Constraint_impl<"bsl", ".8b",  VPR64, v8i8,
405                                               0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
406
407 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
408                                               0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
409
410 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
411                                    Instruction INST8B,
412                                    Instruction INST16B> {
413   // Disassociate type from instruction definition
414   def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
415             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
416   def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
417             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
418   def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
419             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
420   def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
421             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
422   def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
423             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
424   def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
425             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
426
427   // Allow to match BSL instruction pattern with non-constant operand
428   def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
429                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
430           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
431   def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
432                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
433           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
434   def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
435                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
436           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
437   def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
438                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
439           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
440   def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
441                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
442           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
443   def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
444                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
445           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
446   def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
447                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
448           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
449   def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
450                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
451           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
452
453   // Allow to match llvm.arm.* intrinsics.
454   def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
455                     (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
456             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
457   def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
458                     (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
459             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
460   def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
461                     (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
462             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
463   def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
464                     (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
465             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
466   def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
467                     (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
468             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
469   def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
470                     (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
471             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
472   def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
473                     (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
474             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
475   def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
476                     (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
477             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
478   def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
479                     (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
480             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
481   def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
482                     (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
483             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
484   def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
485                     (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
486             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
487 }
488
489 // Additional patterns for bitwise instruction BSL
490 defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
491
492 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
493                            (Neon_bsl node:$src, node:$Rn, node:$Rm),
494                            [{ (void)N; return false; }]>;
495
496 // Vector Bitwise Insert if True
497
498 def BITvvv_8B  : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64,   v8i8,
499                    0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
500 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
501                    0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
502
503 // Vector Bitwise Insert if False
504
505 def BIFvvv_8B  : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64,  v8i8,
506                                 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
507 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
508                                 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
509
510 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
511
512 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
513                        (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
514 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
515                        (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
516
517 // Vector Absolute Difference and Accumulate (Unsigned)
518 def UABAvvv_8B :  NeonI_3VSame_Constraint_impl<"uaba", ".8b",  VPR64,  v8i8,
519                     0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
520 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
521                     0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
522 def UABAvvv_4H :  NeonI_3VSame_Constraint_impl<"uaba", ".4h",  VPR64,  v4i16,
523                     0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
524 def UABAvvv_8H :  NeonI_3VSame_Constraint_impl<"uaba", ".8h",  VPR128, v8i16,
525                     0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
526 def UABAvvv_2S :  NeonI_3VSame_Constraint_impl<"uaba", ".2s",  VPR64,  v2i32,
527                     0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
528 def UABAvvv_4S :  NeonI_3VSame_Constraint_impl<"uaba", ".4s",  VPR128, v4i32,
529                     0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
530
531 // Vector Absolute Difference and Accumulate (Signed)
532 def SABAvvv_8B :  NeonI_3VSame_Constraint_impl<"saba", ".8b",  VPR64,  v8i8,
533                     0b0, 0b0, 0b00, 0b01111, Neon_saba>;
534 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
535                     0b1, 0b0, 0b00, 0b01111, Neon_saba>;
536 def SABAvvv_4H :  NeonI_3VSame_Constraint_impl<"saba", ".4h",  VPR64,  v4i16,
537                     0b0, 0b0, 0b01, 0b01111, Neon_saba>;
538 def SABAvvv_8H :  NeonI_3VSame_Constraint_impl<"saba", ".8h",  VPR128, v8i16,
539                     0b1, 0b0, 0b01, 0b01111, Neon_saba>;
540 def SABAvvv_2S :  NeonI_3VSame_Constraint_impl<"saba", ".2s",  VPR64,  v2i32,
541                     0b0, 0b0, 0b10, 0b01111, Neon_saba>;
542 def SABAvvv_4S :  NeonI_3VSame_Constraint_impl<"saba", ".4s",  VPR128, v4i32,
543                     0b1, 0b0, 0b10, 0b01111, Neon_saba>;
544
545
546 // Vector Absolute Difference (Signed, Unsigned)
547 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
548 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
549
550 // Vector Absolute Difference (Floating Point)
551 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
552                                     int_arm_neon_vabds, int_arm_neon_vabds,
553                                     int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
554
555 // Vector Reciprocal Step (Floating Point)
556 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
557                                        int_arm_neon_vrecps, int_arm_neon_vrecps,
558                                        int_arm_neon_vrecps,
559                                        v2f32, v4f32, v2f64, 0>;
560
561 // Vector Reciprocal Square Root Step (Floating Point)
562 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
563                                         int_arm_neon_vrsqrts,
564                                         int_arm_neon_vrsqrts,
565                                         int_arm_neon_vrsqrts,
566                                         v2f32, v4f32, v2f64, 0>;
567
568 // Vector Comparisons
569
570 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
571                         (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
572 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
573                          (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
574 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
575                         (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
576 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
577                         (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
578 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
579                         (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
580
581 // NeonI_compare_aliases class: swaps register operands to implement
582 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
583 class NeonI_compare_aliases<string asmop, string asmlane,
584                             Instruction inst, RegisterOperand VPRC>
585   : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
586                     ", $Rm" # asmlane,
587                   (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
588
589 // Vector Comparisons (Integer)
590
591 // Vector Compare Mask Equal (Integer)
592 let isCommutable =1 in {
593 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
594 }
595
596 // Vector Compare Mask Higher or Same (Unsigned Integer)
597 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
598
599 // Vector Compare Mask Greater Than or Equal (Integer)
600 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
601
602 // Vector Compare Mask Higher (Unsigned Integer)
603 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
604
605 // Vector Compare Mask Greater Than (Integer)
606 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
607
608 // Vector Compare Mask Bitwise Test (Integer)
609 defm CMTSTvvv:  NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
610
611 // Vector Compare Mask Less or Same (Unsigned Integer)
612 // CMLS is alias for CMHS with operands reversed.
613 def CMLSvvv_8B  : NeonI_compare_aliases<"cmls", ".8b",  CMHSvvv_8B,  VPR64>;
614 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
615 def CMLSvvv_4H  : NeonI_compare_aliases<"cmls", ".4h",  CMHSvvv_4H,  VPR64>;
616 def CMLSvvv_8H  : NeonI_compare_aliases<"cmls", ".8h",  CMHSvvv_8H,  VPR128>;
617 def CMLSvvv_2S  : NeonI_compare_aliases<"cmls", ".2s",  CMHSvvv_2S,  VPR64>;
618 def CMLSvvv_4S  : NeonI_compare_aliases<"cmls", ".4s",  CMHSvvv_4S,  VPR128>;
619 def CMLSvvv_2D  : NeonI_compare_aliases<"cmls", ".2d",  CMHSvvv_2D,  VPR128>;
620
621 // Vector Compare Mask Less Than or Equal (Integer)
622 // CMLE is alias for CMGE with operands reversed.
623 def CMLEvvv_8B  : NeonI_compare_aliases<"cmle", ".8b",  CMGEvvv_8B,  VPR64>;
624 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
625 def CMLEvvv_4H  : NeonI_compare_aliases<"cmle", ".4h",  CMGEvvv_4H,  VPR64>;
626 def CMLEvvv_8H  : NeonI_compare_aliases<"cmle", ".8h",  CMGEvvv_8H,  VPR128>;
627 def CMLEvvv_2S  : NeonI_compare_aliases<"cmle", ".2s",  CMGEvvv_2S,  VPR64>;
628 def CMLEvvv_4S  : NeonI_compare_aliases<"cmle", ".4s",  CMGEvvv_4S,  VPR128>;
629 def CMLEvvv_2D  : NeonI_compare_aliases<"cmle", ".2d",  CMGEvvv_2D,  VPR128>;
630
631 // Vector Compare Mask Lower (Unsigned Integer)
632 // CMLO is alias for CMHI with operands reversed.
633 def CMLOvvv_8B  : NeonI_compare_aliases<"cmlo", ".8b",  CMHIvvv_8B,  VPR64>;
634 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
635 def CMLOvvv_4H  : NeonI_compare_aliases<"cmlo", ".4h",  CMHIvvv_4H,  VPR64>;
636 def CMLOvvv_8H  : NeonI_compare_aliases<"cmlo", ".8h",  CMHIvvv_8H,  VPR128>;
637 def CMLOvvv_2S  : NeonI_compare_aliases<"cmlo", ".2s",  CMHIvvv_2S,  VPR64>;
638 def CMLOvvv_4S  : NeonI_compare_aliases<"cmlo", ".4s",  CMHIvvv_4S,  VPR128>;
639 def CMLOvvv_2D  : NeonI_compare_aliases<"cmlo", ".2d",  CMHIvvv_2D,  VPR128>;
640
641 // Vector Compare Mask Less Than (Integer)
642 // CMLT is alias for CMGT with operands reversed.
643 def CMLTvvv_8B  : NeonI_compare_aliases<"cmlt", ".8b",  CMGTvvv_8B,  VPR64>;
644 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
645 def CMLTvvv_4H  : NeonI_compare_aliases<"cmlt", ".4h",  CMGTvvv_4H,  VPR64>;
646 def CMLTvvv_8H  : NeonI_compare_aliases<"cmlt", ".8h",  CMGTvvv_8H,  VPR128>;
647 def CMLTvvv_2S  : NeonI_compare_aliases<"cmlt", ".2s",  CMGTvvv_2S,  VPR64>;
648 def CMLTvvv_4S  : NeonI_compare_aliases<"cmlt", ".4s",  CMGTvvv_4S,  VPR128>;
649 def CMLTvvv_2D  : NeonI_compare_aliases<"cmlt", ".2d",  CMGTvvv_2D,  VPR128>;
650
651
652 def neon_uimm0_asmoperand : AsmOperandClass
653 {
654   let Name = "UImm0";
655   let PredicateMethod = "isUImm<0>";
656   let RenderMethod = "addImmOperands";
657 }
658
659 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
660   let ParserMatchClass = neon_uimm0_asmoperand;
661   let PrintMethod = "printNeonUImm0Operand";
662
663 }
664
665 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
666 {
667   def _8B :  NeonI_2VMisc<0b0, u, 0b00, opcode,
668              (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
669              asmop # "\t$Rd.8b, $Rn.8b, $Imm",
670              [(set (v8i8 VPR64:$Rd),
671                 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
672              NoItinerary>;
673
674   def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
675              (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
676              asmop # "\t$Rd.16b, $Rn.16b, $Imm",
677              [(set (v16i8 VPR128:$Rd),
678                 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
679              NoItinerary>;
680
681   def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
682             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
683             asmop # "\t$Rd.4h, $Rn.4h, $Imm",
684             [(set (v4i16 VPR64:$Rd),
685                (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
686             NoItinerary>;
687
688   def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
689             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
690             asmop # "\t$Rd.8h, $Rn.8h, $Imm",
691             [(set (v8i16 VPR128:$Rd),
692                (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
693             NoItinerary>;
694
695   def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
696             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
697             asmop # "\t$Rd.2s, $Rn.2s, $Imm",
698             [(set (v2i32 VPR64:$Rd),
699                (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
700             NoItinerary>;
701
702   def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
703             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
704             asmop # "\t$Rd.4s, $Rn.4s, $Imm",
705             [(set (v4i32 VPR128:$Rd),
706                (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
707             NoItinerary>;
708
709   def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
710             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
711             asmop # "\t$Rd.2d, $Rn.2d, $Imm",
712             [(set (v2i64 VPR128:$Rd),
713                (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
714             NoItinerary>;
715 }
716
717 // Vector Compare Mask Equal to Zero (Integer)
718 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
719
720 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
721 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
722
723 // Vector Compare Mask Greater Than Zero (Signed Integer)
724 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
725
726 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
727 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
728
729 // Vector Compare Mask Less Than Zero (Signed Integer)
730 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
731
732 // Vector Comparisons (Floating Point)
733
734 // Vector Compare Mask Equal (Floating Point)
735 let isCommutable =1 in {
736 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
737                                       Neon_cmeq, Neon_cmeq,
738                                       v2i32, v4i32, v2i64, 0>;
739 }
740
741 // Vector Compare Mask Greater Than Or Equal (Floating Point)
742 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
743                                       Neon_cmge, Neon_cmge,
744                                       v2i32, v4i32, v2i64, 0>;
745
746 // Vector Compare Mask Greater Than (Floating Point)
747 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
748                                       Neon_cmgt, Neon_cmgt,
749                                       v2i32, v4i32, v2i64, 0>;
750
751 // Vector Compare Mask Less Than Or Equal (Floating Point)
752 // FCMLE is alias for FCMGE with operands reversed.
753 def FCMLEvvv_2S  : NeonI_compare_aliases<"fcmle", ".2s",  FCMGEvvv_2S,  VPR64>;
754 def FCMLEvvv_4S  : NeonI_compare_aliases<"fcmle", ".4s",  FCMGEvvv_4S,  VPR128>;
755 def FCMLEvvv_2D  : NeonI_compare_aliases<"fcmle", ".2d",  FCMGEvvv_2D,  VPR128>;
756
757 // Vector Compare Mask Less Than (Floating Point)
758 // FCMLT is alias for FCMGT with operands reversed.
759 def FCMLTvvv_2S  : NeonI_compare_aliases<"fcmlt", ".2s",  FCMGTvvv_2S,  VPR64>;
760 def FCMLTvvv_4S  : NeonI_compare_aliases<"fcmlt", ".4s",  FCMGTvvv_4S,  VPR128>;
761 def FCMLTvvv_2D  : NeonI_compare_aliases<"fcmlt", ".2d",  FCMGTvvv_2D,  VPR128>;
762
763
764 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
765                               string asmop, CondCode CC>
766 {
767   def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
768             (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
769             asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
770             [(set (v2i32 VPR64:$Rd),
771                (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
772             NoItinerary>;
773
774   def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
775             (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
776             asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
777             [(set (v4i32 VPR128:$Rd),
778                (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
779             NoItinerary>;
780
781   def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
782             (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
783             asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
784             [(set (v2i64 VPR128:$Rd),
785                (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
786             NoItinerary>;
787 }
788
789 // Vector Compare Mask Equal to Zero (Floating Point)
790 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
791
792 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
793 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
794
795 // Vector Compare Mask Greater Than Zero (Floating Point)
796 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
797
798 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
799 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
800
801 // Vector Compare Mask Less Than Zero (Floating Point)
802 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
803
804 // Vector Absolute Comparisons (Floating Point)
805
806 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
807 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
808                                       int_arm_neon_vacged, int_arm_neon_vacgeq,
809                                       int_aarch64_neon_vacgeq,
810                                       v2i32, v4i32, v2i64, 0>;
811
812 // Vector Absolute Compare Mask Greater Than (Floating Point)
813 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
814                                       int_arm_neon_vacgtd, int_arm_neon_vacgtq,
815                                       int_aarch64_neon_vacgtq,
816                                       v2i32, v4i32, v2i64, 0>;
817
818 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
819 // FACLE is alias for FACGE with operands reversed.
820 def FACLEvvv_2S  : NeonI_compare_aliases<"facle", ".2s",  FACGEvvv_2S,  VPR64>;
821 def FACLEvvv_4S  : NeonI_compare_aliases<"facle", ".4s",  FACGEvvv_4S,  VPR128>;
822 def FACLEvvv_2D  : NeonI_compare_aliases<"facle", ".2d",  FACGEvvv_2D,  VPR128>;
823
824 // Vector Absolute Compare Mask Less Than (Floating Point)
825 // FACLT is alias for FACGT with operands reversed.
826 def FACLTvvv_2S  : NeonI_compare_aliases<"faclt", ".2s",  FACGTvvv_2S,  VPR64>;
827 def FACLTvvv_4S  : NeonI_compare_aliases<"faclt", ".4s",  FACGTvvv_4S,  VPR128>;
828 def FACLTvvv_2D  : NeonI_compare_aliases<"faclt", ".2d",  FACGTvvv_2D,  VPR128>;
829
830 // Vector halving add (Integer Signed, Unsigned)
831 defm SHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
832                                         int_arm_neon_vhadds, 1>;
833 defm UHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
834                                         int_arm_neon_vhaddu, 1>;
835
836 // Vector halving sub (Integer Signed, Unsigned)
837 defm SHSUBvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
838                                         int_arm_neon_vhsubs, 0>;
839 defm UHSUBvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
840                                         int_arm_neon_vhsubu, 0>;
841
842 // Vector rouding halving add (Integer Signed, Unsigned)
843 defm SRHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
844                                          int_arm_neon_vrhadds, 1>;
845 defm URHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
846                                          int_arm_neon_vrhaddu, 1>;
847
848 // Vector Saturating add (Integer Signed, Unsigned)
849 defm SQADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
850                    int_arm_neon_vqadds, 1>;
851 defm UQADDvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
852                    int_arm_neon_vqaddu, 1>;
853
854 // Vector Saturating sub (Integer Signed, Unsigned)
855 defm SQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
856                    int_arm_neon_vqsubs, 1>;
857 defm UQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
858                    int_arm_neon_vqsubu, 1>;
859
860 // Vector Shift Left (Signed and Unsigned Integer)
861 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
862                  int_arm_neon_vshifts, 1>;
863 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
864                  int_arm_neon_vshiftu, 1>;
865
866 // Vector Saturating Shift Left (Signed and Unsigned Integer)
867 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
868                   int_arm_neon_vqshifts, 1>;
869 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
870                   int_arm_neon_vqshiftu, 1>;
871
872 // Vector Rouding Shift Left (Signed and Unsigned Integer)
873 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
874                   int_arm_neon_vrshifts, 1>;
875 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
876                   int_arm_neon_vrshiftu, 1>;
877
878 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
879 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
880                    int_arm_neon_vqrshifts, 1>;
881 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
882                    int_arm_neon_vqrshiftu, 1>;
883
884 // Vector Maximum (Signed and Unsigned Integer)
885 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
886 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
887
888 // Vector Minimum (Signed and Unsigned Integer)
889 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
890 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
891
892 // Vector Maximum (Floating Point)
893 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
894                                      int_arm_neon_vmaxs, int_arm_neon_vmaxs,
895                                      int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
896
897 // Vector Minimum (Floating Point)
898 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
899                                      int_arm_neon_vmins, int_arm_neon_vmins,
900                                      int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
901
902 // Vector maxNum (Floating Point) -  prefer a number over a quiet NaN)
903 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
904                                        int_aarch64_neon_vmaxnm,
905                                        int_aarch64_neon_vmaxnm,
906                                        int_aarch64_neon_vmaxnm,
907                                        v2f32, v4f32, v2f64, 1>;
908
909 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
910 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
911                                        int_aarch64_neon_vminnm,
912                                        int_aarch64_neon_vminnm,
913                                        int_aarch64_neon_vminnm,
914                                        v2f32, v4f32, v2f64, 1>;
915
916 // Vector Maximum Pairwise (Signed and Unsigned Integer)
917 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
918 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
919
920 // Vector Minimum Pairwise (Signed and Unsigned Integer)
921 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
922 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
923
924 // Vector Maximum Pairwise (Floating Point)
925 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
926                                      int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
927                                      int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
928
929 // Vector Minimum Pairwise (Floating Point)
930 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
931                                      int_arm_neon_vpmins, int_arm_neon_vpmins,
932                                      int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
933
934 // Vector maxNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
935 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
936                                        int_aarch64_neon_vpmaxnm,
937                                        int_aarch64_neon_vpmaxnm,
938                                        int_aarch64_neon_vpmaxnm,
939                                        v2f32, v4f32, v2f64, 1>;
940
941 // Vector minNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
942 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
943                                        int_aarch64_neon_vpminnm,
944                                        int_aarch64_neon_vpminnm,
945                                        int_aarch64_neon_vpminnm,
946                                        v2f32, v4f32, v2f64, 1>;
947
948 // Vector Addition Pairwise (Integer)
949 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
950
951 // Vector Addition Pairwise (Floating Point)
952 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
953                                        int_arm_neon_vpadd,
954                                        int_arm_neon_vpadd,
955                                        int_arm_neon_vpadd,
956                                        v2f32, v4f32, v2f64, 1>;
957
958 // Vector Saturating Doubling Multiply High
959 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
960                     int_arm_neon_vqdmulh, 1>;
961
962 // Vector Saturating Rouding Doubling Multiply High
963 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
964                      int_arm_neon_vqrdmulh, 1>;
965
966 // Vector Multiply Extended (Floating Point)
967 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
968                                       int_aarch64_neon_vmulx,
969                                       int_aarch64_neon_vmulx,
970                                       int_aarch64_neon_vmulx,
971                                       v2f32, v4f32, v2f64, 1>;
972
973 // Vector Immediate Instructions
974
975 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
976 {
977   def _asmoperand : AsmOperandClass
978     {
979       let Name = "NeonMovImmShift" # PREFIX;
980       let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
981       let PredicateMethod = "isNeonMovImmShift" # PREFIX;
982     }
983 }
984
985 // Definition of vector immediates shift operands
986
987 // The selectable use-cases extract the shift operation
988 // information from the OpCmode fields encoded in the immediate.
989 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
990   uint64_t OpCmode = N->getZExtValue();
991   unsigned ShiftImm;
992   unsigned ShiftOnesIn;
993   unsigned HasShift =
994     A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
995   if (!HasShift) return SDValue();
996   return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
997 }]>;
998
999 // Vector immediates shift operands which accept LSL and MSL
1000 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
1001 // or 0, 8 (LSLH) or 8, 16 (MSL).
1002 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
1003 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
1004 // LSLH restricts shift amount to  0, 8 out of 0, 8, 16, 24
1005 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
1006
1007 multiclass neon_mov_imm_shift_operands<string PREFIX,
1008                                        string HALF, string ISHALF, code pred>
1009 {
1010    def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
1011     {
1012       let PrintMethod =
1013         "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1014       let DecoderMethod =
1015         "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1016       let ParserMatchClass =
1017         !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1018     }
1019 }
1020
1021 defm neon_mov_imm_LSL  : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1022   unsigned ShiftImm;
1023   unsigned ShiftOnesIn;
1024   unsigned HasShift =
1025     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1026   return (HasShift && !ShiftOnesIn);
1027 }]>;
1028
1029 defm neon_mov_imm_MSL  : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1030   unsigned ShiftImm;
1031   unsigned ShiftOnesIn;
1032   unsigned HasShift =
1033     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1034   return (HasShift && ShiftOnesIn);
1035 }]>;
1036
1037 defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1038   unsigned ShiftImm;
1039   unsigned ShiftOnesIn;
1040   unsigned HasShift =
1041     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1042   return (HasShift && !ShiftOnesIn);
1043 }]>;
1044
1045 def neon_uimm1_asmoperand : AsmOperandClass
1046 {
1047   let Name = "UImm1";
1048   let PredicateMethod = "isUImm<1>";
1049   let RenderMethod = "addImmOperands";
1050 }
1051
1052 def neon_uimm2_asmoperand : AsmOperandClass
1053 {
1054   let Name = "UImm2";
1055   let PredicateMethod = "isUImm<2>";
1056   let RenderMethod = "addImmOperands";
1057 }
1058
1059 def neon_uimm8_asmoperand : AsmOperandClass
1060 {
1061   let Name = "UImm8";
1062   let PredicateMethod = "isUImm<8>";
1063   let RenderMethod = "addImmOperands";
1064 }
1065
1066 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1067   let ParserMatchClass = neon_uimm8_asmoperand;
1068   let PrintMethod = "printUImmHexOperand";
1069 }
1070
1071 def neon_uimm64_mask_asmoperand : AsmOperandClass
1072 {
1073   let Name = "NeonUImm64Mask";
1074   let PredicateMethod = "isNeonUImm64Mask";
1075   let RenderMethod = "addNeonUImm64MaskOperands";
1076 }
1077
1078 // MCOperand for 64-bit bytemask with each byte having only the
1079 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1080 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1081   let ParserMatchClass = neon_uimm64_mask_asmoperand;
1082   let PrintMethod = "printNeonUImm64MaskOperand";
1083 }
1084
1085 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1086                                    SDPatternOperator opnode>
1087 {
1088     // shift zeros, per word
1089     def _2S  : NeonI_1VModImm<0b0, op,
1090                               (outs VPR64:$Rd),
1091                               (ins neon_uimm8:$Imm,
1092                                 neon_mov_imm_LSL_operand:$Simm),
1093                               !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1094                               [(set (v2i32 VPR64:$Rd),
1095                                  (v2i32 (opnode (timm:$Imm),
1096                                    (neon_mov_imm_LSL_operand:$Simm))))],
1097                               NoItinerary> {
1098        bits<2> Simm;
1099        let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1100      }
1101
1102     def _4S  : NeonI_1VModImm<0b1, op,
1103                               (outs VPR128:$Rd),
1104                               (ins neon_uimm8:$Imm,
1105                                 neon_mov_imm_LSL_operand:$Simm),
1106                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1107                               [(set (v4i32 VPR128:$Rd),
1108                                  (v4i32 (opnode (timm:$Imm),
1109                                    (neon_mov_imm_LSL_operand:$Simm))))],
1110                               NoItinerary> {
1111       bits<2> Simm;
1112       let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1113     }
1114
1115     // shift zeros, per halfword
1116     def _4H  : NeonI_1VModImm<0b0, op,
1117                               (outs VPR64:$Rd),
1118                               (ins neon_uimm8:$Imm,
1119                                 neon_mov_imm_LSLH_operand:$Simm),
1120                               !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1121                               [(set (v4i16 VPR64:$Rd),
1122                                  (v4i16 (opnode (timm:$Imm),
1123                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1124                               NoItinerary> {
1125       bit  Simm;
1126       let cmode = {0b1, 0b0, Simm, 0b0};
1127     }
1128
1129     def _8H  : NeonI_1VModImm<0b1, op,
1130                               (outs VPR128:$Rd),
1131                               (ins neon_uimm8:$Imm,
1132                                 neon_mov_imm_LSLH_operand:$Simm),
1133                               !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1134                               [(set (v8i16 VPR128:$Rd),
1135                                  (v8i16 (opnode (timm:$Imm),
1136                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1137                               NoItinerary> {
1138       bit Simm;
1139       let cmode = {0b1, 0b0, Simm, 0b0};
1140      }
1141 }
1142
1143 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1144                                                    SDPatternOperator opnode,
1145                                                    SDPatternOperator neonopnode>
1146 {
1147   let Constraints = "$src = $Rd" in {
1148     // shift zeros, per word
1149     def _2S  : NeonI_1VModImm<0b0, op,
1150                  (outs VPR64:$Rd),
1151                  (ins VPR64:$src, neon_uimm8:$Imm,
1152                    neon_mov_imm_LSL_operand:$Simm),
1153                  !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1154                  [(set (v2i32 VPR64:$Rd),
1155                     (v2i32 (opnode (v2i32 VPR64:$src),
1156                       (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
1157                         neon_mov_imm_LSL_operand:$Simm)))))))],
1158                  NoItinerary> {
1159       bits<2> Simm;
1160       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1161     }
1162
1163     def _4S  : NeonI_1VModImm<0b1, op,
1164                  (outs VPR128:$Rd),
1165                  (ins VPR128:$src, neon_uimm8:$Imm,
1166                    neon_mov_imm_LSL_operand:$Simm),
1167                  !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1168                  [(set (v4i32 VPR128:$Rd),
1169                     (v4i32 (opnode (v4i32 VPR128:$src),
1170                       (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
1171                         neon_mov_imm_LSL_operand:$Simm)))))))],
1172                  NoItinerary> {
1173       bits<2> Simm;
1174       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1175     }
1176
1177     // shift zeros, per halfword
1178     def _4H  : NeonI_1VModImm<0b0, op,
1179                  (outs VPR64:$Rd),
1180                  (ins VPR64:$src, neon_uimm8:$Imm,
1181                    neon_mov_imm_LSLH_operand:$Simm),
1182                  !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1183                  [(set (v4i16 VPR64:$Rd),
1184                     (v4i16 (opnode (v4i16 VPR64:$src),
1185                        (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
1186                           neon_mov_imm_LSL_operand:$Simm)))))))],
1187                  NoItinerary> {
1188       bit  Simm;
1189       let cmode = {0b1, 0b0, Simm, 0b1};
1190     }
1191
1192     def _8H  : NeonI_1VModImm<0b1, op,
1193                  (outs VPR128:$Rd),
1194                  (ins VPR128:$src, neon_uimm8:$Imm,
1195                    neon_mov_imm_LSLH_operand:$Simm),
1196                  !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1197                  [(set (v8i16 VPR128:$Rd),
1198                     (v8i16 (opnode (v8i16 VPR128:$src),
1199                       (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
1200                         neon_mov_imm_LSL_operand:$Simm)))))))],
1201                  NoItinerary> {
1202       bit Simm;
1203       let cmode = {0b1, 0b0, Simm, 0b1};
1204     }
1205   }
1206 }
1207
1208 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1209                                    SDPatternOperator opnode>
1210 {
1211     // shift ones, per word
1212     def _2S  : NeonI_1VModImm<0b0, op,
1213                              (outs VPR64:$Rd),
1214                              (ins neon_uimm8:$Imm,
1215                                neon_mov_imm_MSL_operand:$Simm),
1216                              !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1217                               [(set (v2i32 VPR64:$Rd),
1218                                  (v2i32 (opnode (timm:$Imm),
1219                                    (neon_mov_imm_MSL_operand:$Simm))))],
1220                              NoItinerary> {
1221        bit Simm;
1222        let cmode = {0b1, 0b1, 0b0, Simm};
1223      }
1224
1225    def _4S  : NeonI_1VModImm<0b1, op,
1226                               (outs VPR128:$Rd),
1227                               (ins neon_uimm8:$Imm,
1228                                 neon_mov_imm_MSL_operand:$Simm),
1229                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1230                               [(set (v4i32 VPR128:$Rd),
1231                                  (v4i32 (opnode (timm:$Imm),
1232                                    (neon_mov_imm_MSL_operand:$Simm))))],
1233                               NoItinerary> {
1234      bit Simm;
1235      let cmode = {0b1, 0b1, 0b0, Simm};
1236    }
1237 }
1238
1239 // Vector Move Immediate Shifted
1240 let isReMaterializable = 1 in {
1241 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1242 }
1243
1244 // Vector Move Inverted Immediate Shifted
1245 let isReMaterializable = 1 in {
1246 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1247 }
1248
1249 // Vector Bitwise Bit Clear (AND NOT) - immediate
1250 let isReMaterializable = 1 in {
1251 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1252                                                          and, Neon_mvni>;
1253 }
1254
1255 // Vector Bitwise OR - immedidate
1256
1257 let isReMaterializable = 1 in {
1258 defm ORRvi_lsl   : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1259                                                            or, Neon_movi>;
1260 }
1261
1262 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1263 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1264 // BIC immediate instructions selection requires additional patterns to
1265 // transform Neon_movi operands into BIC immediate operands
1266
1267 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1268   uint64_t OpCmode = N->getZExtValue();
1269   unsigned ShiftImm;
1270   unsigned ShiftOnesIn;
1271   (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1272   // LSLH restricts shift amount to  0, 8 which are encoded as 0 and 1
1273   // Transform encoded shift amount 0 to 1 and 1 to 0.
1274   return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1275 }]>;
1276
1277 def neon_mov_imm_LSLH_transform_operand
1278   : ImmLeaf<i32, [{
1279     unsigned ShiftImm;
1280     unsigned ShiftOnesIn;
1281     unsigned HasShift =
1282       A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1283     return (HasShift && !ShiftOnesIn); }],
1284   neon_mov_imm_LSLH_transform_XFORM>;
1285
1286 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
1287 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
1288 def : Pat<(v4i16 (and VPR64:$src,
1289             (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1290           (BICvi_lsl_4H VPR64:$src, 0,
1291             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1292
1293 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
1294 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
1295 def : Pat<(v8i16 (and VPR128:$src,
1296             (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1297           (BICvi_lsl_8H VPR128:$src, 0,
1298             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1299
1300
1301 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1302                                    SDPatternOperator neonopnode,
1303                                    Instruction INST4H,
1304                                    Instruction INST8H> {
1305   def : Pat<(v8i8 (opnode VPR64:$src,
1306                     (bitconvert(v4i16 (neonopnode timm:$Imm,
1307                       neon_mov_imm_LSLH_operand:$Simm))))),
1308             (INST4H VPR64:$src, neon_uimm8:$Imm,
1309               neon_mov_imm_LSLH_operand:$Simm)>;
1310   def : Pat<(v1i64 (opnode VPR64:$src,
1311                   (bitconvert(v4i16 (neonopnode timm:$Imm,
1312                     neon_mov_imm_LSLH_operand:$Simm))))),
1313           (INST4H VPR64:$src, neon_uimm8:$Imm,
1314             neon_mov_imm_LSLH_operand:$Simm)>;
1315
1316   def : Pat<(v16i8 (opnode VPR128:$src,
1317                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1318                      neon_mov_imm_LSLH_operand:$Simm))))),
1319           (INST8H VPR128:$src, neon_uimm8:$Imm,
1320             neon_mov_imm_LSLH_operand:$Simm)>;
1321   def : Pat<(v4i32 (opnode VPR128:$src,
1322                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1323                      neon_mov_imm_LSLH_operand:$Simm))))),
1324           (INST8H VPR128:$src, neon_uimm8:$Imm,
1325             neon_mov_imm_LSLH_operand:$Simm)>;
1326   def : Pat<(v2i64 (opnode VPR128:$src,
1327                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1328                      neon_mov_imm_LSLH_operand:$Simm))))),
1329           (INST8H VPR128:$src, neon_uimm8:$Imm,
1330             neon_mov_imm_LSLH_operand:$Simm)>;
1331 }
1332
1333 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1334 defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
1335
1336 // Additional patterns for Vector Bitwise OR - immedidate
1337 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
1338
1339
1340 // Vector Move Immediate Masked
1341 let isReMaterializable = 1 in {
1342 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1343 }
1344
1345 // Vector Move Inverted Immediate Masked
1346 let isReMaterializable = 1 in {
1347 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1348 }
1349
1350 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1351                                 Instruction inst, RegisterOperand VPRC>
1352   : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
1353                         (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
1354
1355 // Aliases for Vector Move Immediate Shifted
1356 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1357 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1358 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1359 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1360
1361 // Aliases for Vector Move Inverted Immediate Shifted
1362 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1363 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1364 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1365 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1366
1367 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1368 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1369 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1370 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1371 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1372
1373 // Aliases for Vector Bitwise OR - immedidate
1374 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1375 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1376 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1377 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1378
1379 //  Vector Move Immediate - per byte
1380 let isReMaterializable = 1 in {
1381 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1382                                (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1383                                "movi\t$Rd.8b, $Imm",
1384                                [(set (v8i8 VPR64:$Rd),
1385                                   (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1386                                 NoItinerary> {
1387   let cmode = 0b1110;
1388 }
1389
1390 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1391                                 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1392                                 "movi\t$Rd.16b, $Imm",
1393                                 [(set (v16i8 VPR128:$Rd),
1394                                    (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1395                                  NoItinerary> {
1396   let cmode = 0b1110;
1397 }
1398 }
1399
1400 // Vector Move Immediate - bytemask, per double word
1401 let isReMaterializable = 1 in {
1402 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1403                                (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1404                                "movi\t $Rd.2d, $Imm",
1405                                [(set (v2i64 VPR128:$Rd),
1406                                   (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1407                                NoItinerary> {
1408   let cmode = 0b1110;
1409 }
1410 }
1411
1412 // Vector Move Immediate - bytemask, one doubleword
1413
1414 let isReMaterializable = 1 in {
1415 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1416                            (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1417                            "movi\t $Rd, $Imm",
1418                            [(set (f64 FPR64:$Rd),
1419                               (f64 (bitconvert
1420                                 (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))],
1421                            NoItinerary> {
1422   let cmode = 0b1110;
1423 }
1424 }
1425
1426 // Vector Floating Point Move Immediate
1427
1428 class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
1429                       Operand immOpType, bit q, bit op>
1430   : NeonI_1VModImm<q, op,
1431                    (outs VPRC:$Rd), (ins immOpType:$Imm),
1432                    "fmov\t$Rd" # asmlane # ", $Imm",
1433                    [(set (OpTy VPRC:$Rd),
1434                       (OpTy (Neon_fmovi (timm:$Imm))))],
1435                    NoItinerary> {
1436      let cmode = 0b1111;
1437    }
1438
1439 let isReMaterializable = 1 in {
1440 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64,  v2f32, fmov32_operand, 0b0, 0b0>;
1441 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1442 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1443 }
1444
1445 // Vector Shift (Immediate) 
1446 // Immediate in [0, 63]
1447 def imm0_63 : Operand<i32> {
1448   let ParserMatchClass = uimm6_asmoperand;
1449 }
1450
1451 // Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
1452 // as follows:
1453 //
1454 //    Offset    Encoding
1455 //     8        immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
1456 //     16       immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
1457 //     32       immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
1458 //     64       immh:immb<6>   = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
1459 //
1460 // The shift right immediate amount, in the range 1 to element bits, is computed
1461 // as Offset - UInt(immh:immb).  The shift left immediate amount, in the range 0
1462 // to element bits - 1, is computed as UInt(immh:immb) - Offset.
1463
1464 class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
1465   let Name = "ShrImm" # OFFSET;
1466   let RenderMethod = "addImmOperands";
1467   let DiagnosticType = "ShrImm" # OFFSET;
1468 }
1469
1470 class shr_imm<string OFFSET> : Operand<i32> {
1471   let EncoderMethod = "getShiftRightImm" # OFFSET;
1472   let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
1473   let ParserMatchClass = 
1474     !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
1475 }
1476
1477 def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
1478 def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
1479 def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
1480 def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
1481
1482 def shr_imm8 : shr_imm<"8">;
1483 def shr_imm16 : shr_imm<"16">;
1484 def shr_imm32 : shr_imm<"32">;
1485 def shr_imm64 : shr_imm<"64">;
1486
1487 class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
1488   let Name = "ShlImm" # OFFSET;
1489   let RenderMethod = "addImmOperands";
1490   let DiagnosticType = "ShlImm" # OFFSET;
1491 }
1492
1493 class shl_imm<string OFFSET> : Operand<i32> {
1494   let EncoderMethod = "getShiftLeftImm" # OFFSET;
1495   let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
1496   let ParserMatchClass = 
1497     !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
1498 }
1499
1500 def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
1501 def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
1502 def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
1503 def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
1504
1505 def shl_imm8 : shl_imm<"8">;
1506 def shl_imm16 : shl_imm<"16">;
1507 def shl_imm32 : shl_imm<"32">;
1508 def shl_imm64 : shl_imm<"64">;
1509
1510 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
1511                RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
1512   : NeonI_2VShiftImm<q, u, opcode,
1513                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1514                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1515                      [(set (Ty VPRC:$Rd),
1516                         (Ty (OpNode (Ty VPRC:$Rn),
1517                           (Ty (Neon_vdup (i32 imm:$Imm))))))],
1518                      NoItinerary>;
1519
1520 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1521   // 64-bit vector types.
1522   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3, shl> {
1523     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1524   }
1525
1526   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4, shl> {
1527     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1528   }
1529
1530   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5, shl> {
1531     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1532   }
1533
1534   // 128-bit vector types.
1535   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3, shl> {
1536     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1537   }
1538
1539   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4, shl> {
1540     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1541   }
1542
1543   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5, shl> {
1544     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1545   }
1546
1547   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63, shl> {
1548     let Inst{22} = 0b1;        // immh:immb = 1xxxxxx
1549   }
1550 }
1551
1552 multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1553   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1554                      OpNode> {
1555     let Inst{22-19} = 0b0001;
1556   }
1557
1558   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1559                      OpNode> {
1560     let Inst{22-20} = 0b001;
1561   }
1562
1563   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1564                      OpNode> {
1565      let Inst{22-21} = 0b01;
1566   }
1567
1568   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1569                       OpNode> {
1570                       let Inst{22-19} = 0b0001;
1571                     }
1572
1573   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1574                      OpNode> {
1575                      let Inst{22-20} = 0b001;
1576                     }
1577
1578   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1579                      OpNode> {
1580                       let Inst{22-21} = 0b01;
1581                     }
1582
1583   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1584                      OpNode> {
1585                       let Inst{22} = 0b1;
1586                     }
1587 }
1588
1589 // Shift left
1590 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1591
1592 // Shift right
1593 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
1594 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
1595
1596 def Neon_High16B : PatFrag<(ops node:$in),
1597                            (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1598 def Neon_High8H  : PatFrag<(ops node:$in),
1599                            (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1600 def Neon_High4S  : PatFrag<(ops node:$in),
1601                            (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1602 def Neon_High2D  : PatFrag<(ops node:$in),
1603                            (extract_subvector (v2i64 node:$in), (iPTR 1))>;
1604 def Neon_High4float : PatFrag<(ops node:$in),
1605                                (extract_subvector (v4f32 node:$in), (iPTR 2))>;
1606 def Neon_High2double : PatFrag<(ops node:$in),
1607                                (extract_subvector (v2f64 node:$in), (iPTR 1))>;
1608
1609 def Neon_Low16B : PatFrag<(ops node:$in),
1610                           (v8i8 (extract_subvector (v16i8 node:$in),
1611                                                    (iPTR 0)))>;
1612 def Neon_Low8H : PatFrag<(ops node:$in),
1613                          (v4i16 (extract_subvector (v8i16 node:$in),
1614                                                    (iPTR 0)))>;
1615 def Neon_Low4S : PatFrag<(ops node:$in),
1616                          (v2i32 (extract_subvector (v4i32 node:$in),
1617                                                    (iPTR 0)))>;
1618 def Neon_Low2D : PatFrag<(ops node:$in),
1619                          (v1i64 (extract_subvector (v2i64 node:$in),
1620                                                    (iPTR 0)))>;
1621 def Neon_Low4float : PatFrag<(ops node:$in),
1622                              (v2f32 (extract_subvector (v4f32 node:$in),
1623                                                        (iPTR 0)))>;
1624 def Neon_Low2double : PatFrag<(ops node:$in),
1625                               (v1f64 (extract_subvector (v2f64 node:$in),
1626                                                         (iPTR 0)))>;
1627
1628 def neon_uimm3_shift : Operand<i32>,
1629                          ImmLeaf<i32, [{return Imm < 8;}]> {
1630   let ParserMatchClass = uimm3_asmoperand;
1631 }
1632
1633 def neon_uimm4_shift : Operand<i32>,
1634                          ImmLeaf<i32, [{return Imm < 16;}]> {
1635   let ParserMatchClass = uimm4_asmoperand;
1636 }
1637
1638 def neon_uimm5_shift : Operand<i32>,
1639                          ImmLeaf<i32, [{return Imm < 32;}]> {
1640   let ParserMatchClass = uimm5_asmoperand;
1641 }
1642
1643 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1644                    string SrcT, ValueType DestTy, ValueType SrcTy,
1645                    Operand ImmTy, SDPatternOperator ExtOp>
1646   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1647                      (ins VPR64:$Rn, ImmTy:$Imm),
1648                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1649                      [(set (DestTy VPR128:$Rd),
1650                         (DestTy (shl
1651                           (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1652                             (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1653                      NoItinerary>;
1654
1655 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1656                        string SrcT, ValueType DestTy, ValueType SrcTy,
1657                        int StartIndex, Operand ImmTy,
1658                        SDPatternOperator ExtOp, PatFrag getTop>
1659   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1660                      (ins VPR128:$Rn, ImmTy:$Imm),
1661                      asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1662                      [(set (DestTy VPR128:$Rd),
1663                         (DestTy (shl
1664                           (DestTy (ExtOp
1665                             (SrcTy (getTop VPR128:$Rn)))),
1666                               (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1667                      NoItinerary>;
1668
1669 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1670                          SDNode ExtOp> {
1671   // 64-bit vector types.
1672   def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1673                          neon_uimm3_shift, ExtOp> {
1674     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1675   }
1676
1677   def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1678                          neon_uimm4_shift, ExtOp> {
1679     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1680   }
1681
1682   def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1683                          neon_uimm5_shift, ExtOp> {
1684     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1685   }
1686
1687   // 128-bit vector types
1688   def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
1689                               8, neon_uimm3_shift, ExtOp, Neon_High16B> {
1690     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1691   }
1692
1693   def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
1694                              4, neon_uimm4_shift, ExtOp, Neon_High8H> {
1695     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1696   }
1697
1698   def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
1699                              2, neon_uimm5_shift, ExtOp, Neon_High4S> {
1700     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1701   }
1702
1703   // Use other patterns to match when the immediate is 0.
1704   def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1705             (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1706
1707   def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1708             (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1709
1710   def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1711             (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1712
1713   def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
1714             (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1715
1716   def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
1717             (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1718
1719   def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
1720             (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1721 }
1722
1723 // Shift left long
1724 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1725 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1726
1727 // Rounding/Saturating shift
1728 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
1729                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1730                   SDPatternOperator OpNode>
1731   : NeonI_2VShiftImm<q, u, opcode,
1732                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1733                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1734                      [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
1735                         (i32 imm:$Imm))))],
1736                      NoItinerary>;
1737
1738 // shift right (vector by immediate)
1739 multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
1740                            SDPatternOperator OpNode> {
1741   def _8B  : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1742                          OpNode> {
1743     let Inst{22-19} = 0b0001;
1744   }
1745
1746   def _4H  : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1747                          OpNode> {
1748     let Inst{22-20} = 0b001;
1749   }
1750
1751   def _2S  : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1752                          OpNode> {
1753     let Inst{22-21} = 0b01;
1754   }
1755
1756   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1757                          OpNode> {
1758     let Inst{22-19} = 0b0001;
1759   }
1760
1761   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1762                         OpNode> {
1763     let Inst{22-20} = 0b001;
1764   }
1765
1766   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1767                         OpNode> {
1768     let Inst{22-21} = 0b01;
1769   }
1770
1771   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1772                         OpNode> {
1773     let Inst{22} = 0b1;
1774   }
1775 }
1776
1777 multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
1778                           SDPatternOperator OpNode> {
1779   // 64-bit vector types.
1780   def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1781                         OpNode> {
1782     let Inst{22-19} = 0b0001;
1783   }
1784
1785   def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1786                         OpNode> {
1787     let Inst{22-20} = 0b001;
1788   }
1789
1790   def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1791                         OpNode> {
1792     let Inst{22-21} = 0b01;
1793   }
1794
1795   // 128-bit vector types.
1796   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1797                          OpNode> {
1798     let Inst{22-19} = 0b0001;
1799   }
1800
1801   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1802                         OpNode> {
1803     let Inst{22-20} = 0b001;
1804   }
1805
1806   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1807                         OpNode> {
1808     let Inst{22-21} = 0b01;
1809   }
1810
1811   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1812                         OpNode> {
1813     let Inst{22} = 0b1;
1814   }
1815 }
1816
1817 // Rounding shift right
1818 defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
1819                                 int_aarch64_neon_vsrshr>;
1820 defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
1821                                 int_aarch64_neon_vurshr>;
1822
1823 // Saturating shift left unsigned
1824 defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
1825
1826 // Saturating shift left
1827 defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
1828 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
1829
1830 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
1831                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1832                   SDNode OpNode>
1833   : NeonI_2VShiftImm<q, u, opcode,
1834            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1835            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1836            [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1837               (Ty (OpNode (Ty VPRC:$Rn),
1838                 (Ty (Neon_vdup (i32 imm:$Imm))))))))],
1839            NoItinerary> {
1840   let Constraints = "$src = $Rd";
1841 }
1842
1843 // Shift Right accumulate
1844 multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1845   def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1846                         OpNode> {
1847     let Inst{22-19} = 0b0001;
1848   }
1849
1850   def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1851                         OpNode> {
1852     let Inst{22-20} = 0b001;
1853   }
1854
1855   def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1856                         OpNode> {
1857     let Inst{22-21} = 0b01;
1858   }
1859
1860   def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1861                          OpNode> {
1862     let Inst{22-19} = 0b0001;
1863   }
1864
1865   def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1866                         OpNode> {
1867     let Inst{22-20} = 0b001;
1868   }
1869
1870   def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1871                         OpNode> {
1872     let Inst{22-21} = 0b01;
1873   }
1874
1875   def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1876                         OpNode> {
1877     let Inst{22} = 0b1;
1878   }
1879 }
1880
1881 // Shift right and accumulate
1882 defm SSRAvvi    : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
1883 defm USRAvvi    : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
1884
1885 // Rounding shift accumulate
1886 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
1887                     RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1888                     SDPatternOperator OpNode>
1889   : NeonI_2VShiftImm<q, u, opcode,
1890                      (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1891                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1892                      [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1893                         (Ty (OpNode (Ty VPRC:$Rn), (i32 imm:$Imm))))))],
1894                      NoItinerary> {
1895   let Constraints = "$src = $Rd";
1896 }
1897
1898 multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
1899                              SDPatternOperator OpNode> {
1900   def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1901                           OpNode> {
1902     let Inst{22-19} = 0b0001;
1903   }
1904
1905   def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1906                           OpNode> {
1907     let Inst{22-20} = 0b001;
1908   }
1909
1910   def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1911                           OpNode> {
1912     let Inst{22-21} = 0b01;
1913   }
1914
1915   def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1916                            OpNode> {
1917     let Inst{22-19} = 0b0001;
1918   }
1919
1920   def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1921                           OpNode> {
1922     let Inst{22-20} = 0b001;
1923   }
1924
1925   def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1926                           OpNode> {
1927     let Inst{22-21} = 0b01;
1928   }
1929
1930   def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1931                           OpNode> {
1932     let Inst{22} = 0b1;
1933   }
1934 }
1935
1936 // Rounding shift right and accumulate
1937 defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
1938 defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
1939
1940 // Shift insert by immediate
1941 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
1942                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1943                   SDPatternOperator OpNode>
1944     : NeonI_2VShiftImm<q, u, opcode,
1945            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1946            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1947            [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
1948              (i32 imm:$Imm))))],
1949            NoItinerary> {
1950   let Constraints = "$src = $Rd";
1951 }
1952
1953 // shift left insert (vector by immediate)
1954 multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
1955   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1956                         int_aarch64_neon_vsli> {
1957     let Inst{22-19} = 0b0001;
1958   }
1959
1960   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1961                         int_aarch64_neon_vsli> {
1962     let Inst{22-20} = 0b001;
1963   }
1964
1965   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1966                         int_aarch64_neon_vsli> {
1967     let Inst{22-21} = 0b01;
1968   }
1969
1970     // 128-bit vector types
1971   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1972                          int_aarch64_neon_vsli> {
1973     let Inst{22-19} = 0b0001;
1974   }
1975
1976   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1977                         int_aarch64_neon_vsli> {
1978     let Inst{22-20} = 0b001;
1979   }
1980
1981   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1982                         int_aarch64_neon_vsli> {
1983     let Inst{22-21} = 0b01;
1984   }
1985
1986   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1987                         int_aarch64_neon_vsli> {
1988     let Inst{22} = 0b1;
1989   }
1990 }
1991
1992 // shift right insert (vector by immediate)
1993 multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
1994     // 64-bit vector types.
1995   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1996                         int_aarch64_neon_vsri> {
1997     let Inst{22-19} = 0b0001;
1998   }
1999
2000   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
2001                         int_aarch64_neon_vsri> {
2002     let Inst{22-20} = 0b001;
2003   }
2004
2005   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
2006                         int_aarch64_neon_vsri> {
2007     let Inst{22-21} = 0b01;
2008   }
2009
2010     // 128-bit vector types
2011   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
2012                          int_aarch64_neon_vsri> {
2013     let Inst{22-19} = 0b0001;
2014   }
2015
2016   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
2017                         int_aarch64_neon_vsri> {
2018     let Inst{22-20} = 0b001;
2019   }
2020
2021   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
2022                         int_aarch64_neon_vsri> {
2023     let Inst{22-21} = 0b01;
2024   }
2025
2026   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2027                         int_aarch64_neon_vsri> {
2028     let Inst{22} = 0b1;
2029   }
2030 }
2031
2032 // Shift left and insert
2033 defm SLIvvi   : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
2034
2035 // Shift right and insert
2036 defm SRIvvi   : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
2037
2038 class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2039                     string SrcT, Operand ImmTy>
2040   : NeonI_2VShiftImm<q, u, opcode,
2041                      (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
2042                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2043                      [], NoItinerary>;
2044
2045 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2046                        string SrcT, Operand ImmTy>
2047   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
2048                      (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
2049                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2050                      [], NoItinerary> {
2051   let Constraints = "$src = $Rd";
2052 }
2053
2054 // left long shift by immediate
2055 multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
2056   def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
2057     let Inst{22-19} = 0b0001;
2058   }
2059
2060   def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
2061     let Inst{22-20} = 0b001;
2062   }
2063
2064   def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
2065     let Inst{22-21} = 0b01;
2066   }
2067
2068   // Shift Narrow High
2069   def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
2070                               shr_imm8> {
2071     let Inst{22-19} = 0b0001;
2072   }
2073
2074   def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
2075                              shr_imm16> {
2076     let Inst{22-20} = 0b001;
2077   }
2078
2079   def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
2080                              shr_imm32> {
2081     let Inst{22-21} = 0b01;
2082   }
2083 }
2084
2085 // Shift right narrow
2086 defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
2087
2088 // Shift right narrow (prefix Q is saturating, prefix R is rounding)
2089 defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
2090 defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
2091 defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
2092 defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
2093 defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
2094 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
2095 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
2096
2097 def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
2098                               (v2i64 (concat_vectors (v1i64 node:$Rm),
2099                                                      (v1i64 node:$Rn)))>;
2100 def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
2101                               (v8i16 (concat_vectors (v4i16 node:$Rm),
2102                                                      (v4i16 node:$Rn)))>;
2103 def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
2104                               (v4i32 (concat_vectors (v2i32 node:$Rm),
2105                                                      (v2i32 node:$Rn)))>;
2106 def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
2107                               (v4f32 (concat_vectors (v2f32 node:$Rm),
2108                                                      (v2f32 node:$Rn)))>;
2109 def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
2110                               (v2f64 (concat_vectors (v1f64 node:$Rm),
2111                                                      (v1f64 node:$Rn)))>;
2112
2113 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2114                              (v8i16 (srl (v8i16 node:$lhs),
2115                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2116 def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2117                              (v4i32 (srl (v4i32 node:$lhs),
2118                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2119 def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2120                              (v2i64 (srl (v2i64 node:$lhs),
2121                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2122 def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2123                              (v8i16 (sra (v8i16 node:$lhs),
2124                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2125 def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2126                              (v4i32 (sra (v4i32 node:$lhs),
2127                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2128 def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2129                              (v2i64 (sra (v2i64 node:$lhs),
2130                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2131
2132 // Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
2133 multiclass Neon_shiftNarrow_patterns<string shr> {
2134   def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
2135               (i32 imm:$Imm)))),
2136             (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
2137   def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
2138               (i32 imm:$Imm)))),
2139             (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
2140   def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
2141               (i32 imm:$Imm)))),
2142             (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
2143
2144   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2145               (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
2146                 VPR128:$Rn, (i32 imm:$Imm))))))),
2147             (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
2148                          VPR128:$Rn, imm:$Imm)>;
2149   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2150               (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
2151                 VPR128:$Rn, (i32 imm:$Imm))))))),
2152             (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2153                         VPR128:$Rn, imm:$Imm)>;
2154   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2155               (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
2156                 VPR128:$Rn, (i32 imm:$Imm))))))),
2157             (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2158                         VPR128:$Rn, imm:$Imm)>;
2159 }
2160
2161 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
2162   def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm)),
2163             (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
2164   def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm)),
2165             (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
2166   def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm)),
2167             (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
2168
2169   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2170                 (v1i64 (bitconvert (v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm))))),
2171             (!cast<Instruction>(prefix # "_16B")
2172                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2173                 VPR128:$Rn, imm:$Imm)>;
2174   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2175                 (v1i64 (bitconvert (v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm))))),
2176             (!cast<Instruction>(prefix # "_8H")
2177                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2178                 VPR128:$Rn, imm:$Imm)>;
2179   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2180                 (v1i64 (bitconvert (v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm))))),
2181             (!cast<Instruction>(prefix # "_4S")
2182                   (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2183                   VPR128:$Rn, imm:$Imm)>;
2184 }
2185
2186 defm : Neon_shiftNarrow_patterns<"lshr">;
2187 defm : Neon_shiftNarrow_patterns<"ashr">;
2188
2189 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
2190 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
2191 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
2192 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
2193 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
2194 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
2195 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
2196
2197 // Convert fix-point and float-pointing
2198 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
2199                 RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
2200                 Operand ImmTy, SDPatternOperator IntOp>
2201   : NeonI_2VShiftImm<q, u, opcode,
2202                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2203                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2204                      [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
2205                        (i32 imm:$Imm))))],
2206                      NoItinerary>;
2207
2208 multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
2209                               SDPatternOperator IntOp> {
2210   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
2211                       shr_imm32, IntOp> {
2212     let Inst{22-21} = 0b01;
2213   }
2214
2215   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
2216                       shr_imm32, IntOp> {
2217     let Inst{22-21} = 0b01;
2218   }
2219
2220   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
2221                       shr_imm64, IntOp> {
2222     let Inst{22} = 0b1;
2223   }
2224 }
2225
2226 multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
2227                               SDPatternOperator IntOp> {
2228   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
2229                       shr_imm32, IntOp> {
2230     let Inst{22-21} = 0b01;
2231   }
2232
2233   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
2234                       shr_imm32, IntOp> {
2235     let Inst{22-21} = 0b01;
2236   }
2237
2238   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
2239                       shr_imm64, IntOp> {
2240     let Inst{22} = 0b1;
2241   }
2242 }
2243
2244 // Convert fixed-point to floating-point
2245 defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
2246                                    int_arm_neon_vcvtfxs2fp>;
2247 defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
2248                                    int_arm_neon_vcvtfxu2fp>;
2249
2250 // Convert floating-point to fixed-point
2251 defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
2252                                    int_arm_neon_vcvtfp2fxs>;
2253 defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
2254                                    int_arm_neon_vcvtfp2fxu>;
2255
2256 multiclass Neon_sshll2_0<SDNode ext>
2257 {
2258   def _v8i8  : PatFrag<(ops node:$Rn),
2259                        (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
2260   def _v4i16 : PatFrag<(ops node:$Rn),
2261                        (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
2262   def _v2i32 : PatFrag<(ops node:$Rn),
2263                        (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
2264 }
2265
2266 defm NI_sext_high : Neon_sshll2_0<sext>;
2267 defm NI_zext_high : Neon_sshll2_0<zext>;
2268
2269
2270 //===----------------------------------------------------------------------===//
2271 // Multiclasses for NeonI_Across
2272 //===----------------------------------------------------------------------===//
2273
2274 // Variant 1
2275
2276 multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
2277                             string asmop, SDPatternOperator opnode>
2278 {
2279     def _1h8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2280                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2281                 asmop # "\t$Rd, $Rn.8b",
2282                 [(set (v1i16 FPR16:$Rd),
2283                     (v1i16 (opnode (v8i8 VPR64:$Rn))))],
2284                 NoItinerary>;
2285
2286     def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2287                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2288                 asmop # "\t$Rd, $Rn.16b",
2289                 [(set (v1i16 FPR16:$Rd),
2290                     (v1i16 (opnode (v16i8 VPR128:$Rn))))],
2291                 NoItinerary>;
2292
2293     def _1s4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2294                 (outs FPR32:$Rd), (ins VPR64:$Rn),
2295                 asmop # "\t$Rd, $Rn.4h",
2296                 [(set (v1i32 FPR32:$Rd),
2297                     (v1i32 (opnode (v4i16 VPR64:$Rn))))],
2298                 NoItinerary>;
2299
2300     def _1s8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2301                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2302                 asmop # "\t$Rd, $Rn.8h",
2303                 [(set (v1i32 FPR32:$Rd),
2304                     (v1i32 (opnode (v8i16 VPR128:$Rn))))],
2305                 NoItinerary>;
2306
2307     // _1d2s doesn't exist!
2308
2309     def _1d4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2310                 (outs FPR64:$Rd), (ins VPR128:$Rn),
2311                 asmop # "\t$Rd, $Rn.4s",
2312                 [(set (v1i64 FPR64:$Rd),
2313                     (v1i64 (opnode (v4i32 VPR128:$Rn))))],
2314                 NoItinerary>;
2315 }
2316
2317 defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
2318 defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
2319
2320 // Variant 2
2321
2322 multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
2323                             string asmop, SDPatternOperator opnode>
2324 {
2325     def _1b8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2326                 (outs FPR8:$Rd), (ins VPR64:$Rn),
2327                 asmop # "\t$Rd, $Rn.8b",
2328                 [(set (v1i8 FPR8:$Rd),
2329                     (v1i8 (opnode (v8i8 VPR64:$Rn))))],
2330                 NoItinerary>;
2331
2332     def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2333                 (outs FPR8:$Rd), (ins VPR128:$Rn),
2334                 asmop # "\t$Rd, $Rn.16b",
2335                 [(set (v1i8 FPR8:$Rd),
2336                     (v1i8 (opnode (v16i8 VPR128:$Rn))))],
2337                 NoItinerary>;
2338
2339     def _1h4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2340                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2341                 asmop # "\t$Rd, $Rn.4h",
2342                 [(set (v1i16 FPR16:$Rd),
2343                     (v1i16 (opnode (v4i16 VPR64:$Rn))))],
2344                 NoItinerary>;
2345
2346     def _1h8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2347                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2348                 asmop # "\t$Rd, $Rn.8h",
2349                 [(set (v1i16 FPR16:$Rd),
2350                     (v1i16 (opnode (v8i16 VPR128:$Rn))))],
2351                 NoItinerary>;
2352
2353     // _1s2s doesn't exist!
2354
2355     def _1s4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2356                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2357                 asmop # "\t$Rd, $Rn.4s",
2358                 [(set (v1i32 FPR32:$Rd),
2359                     (v1i32 (opnode (v4i32 VPR128:$Rn))))],
2360                 NoItinerary>;
2361 }
2362
2363 defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
2364 defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
2365
2366 defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
2367 defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
2368
2369 defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
2370
2371 // Variant 3
2372
2373 multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
2374                             string asmop, SDPatternOperator opnode> {
2375     def _1s4s:  NeonI_2VAcross<0b1, u, size, opcode,
2376                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2377                 asmop # "\t$Rd, $Rn.4s",
2378                 [(set (v1f32 FPR32:$Rd),
2379                     (v1f32 (opnode (v4f32 VPR128:$Rn))))],
2380                 NoItinerary>;
2381 }
2382
2383 defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
2384                                 int_aarch64_neon_vmaxnmv>;
2385 defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
2386                                 int_aarch64_neon_vminnmv>;
2387
2388 defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
2389                               int_aarch64_neon_vmaxv>;
2390 defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
2391                               int_aarch64_neon_vminv>;
2392
2393 // The followings are for instruction class (Perm)
2394
2395 class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
2396                     string asmop, RegisterOperand OpVPR, string OpS,
2397                     SDPatternOperator opnode, ValueType Ty>
2398   : NeonI_Perm<q, size, opcode,
2399                (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2400                asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
2401                [(set (Ty OpVPR:$Rd),
2402                   (Ty (opnode (Ty OpVPR:$Rn), (Ty OpVPR:$Rm))))],
2403                NoItinerary>;
2404
2405 multiclass NeonI_Perm_pat<bits<3> opcode, string asmop,
2406                           SDPatternOperator opnode> {
2407   def _8b  : NeonI_Permute<0b0, 0b00, opcode, asmop,
2408                            VPR64, "8b", opnode, v8i8>;
2409   def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop,
2410                            VPR128, "16b",opnode, v16i8>;
2411   def _4h  : NeonI_Permute<0b0, 0b01, opcode, asmop,
2412                            VPR64, "4h", opnode, v4i16>;
2413   def _8h  : NeonI_Permute<0b1, 0b01, opcode, asmop,
2414                            VPR128, "8h", opnode, v8i16>;
2415   def _2s  : NeonI_Permute<0b0, 0b10, opcode, asmop,
2416                            VPR64, "2s", opnode, v2i32>;
2417   def _4s  : NeonI_Permute<0b1, 0b10, opcode, asmop,
2418                            VPR128, "4s", opnode, v4i32>;
2419   def _2d  : NeonI_Permute<0b1, 0b11, opcode, asmop,
2420                            VPR128, "2d", opnode, v2i64>;
2421 }
2422
2423 defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1", Neon_uzp1>;
2424 defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1", Neon_trn1>;
2425 defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1", Neon_zip1>;
2426 defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2", Neon_uzp2>;
2427 defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2", Neon_trn2>;
2428 defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2", Neon_zip2>;
2429
2430 multiclass NeonI_Perm_float_pat<string INS, SDPatternOperator opnode> {
2431   def : Pat<(v2f32 (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
2432             (!cast<Instruction>(INS # "_2s") VPR64:$Rn, VPR64:$Rm)>;
2433
2434   def : Pat<(v4f32 (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
2435             (!cast<Instruction>(INS # "_4s") VPR128:$Rn, VPR128:$Rm)>;
2436
2437   def : Pat<(v2f64 (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
2438             (!cast<Instruction>(INS # "_2d") VPR128:$Rn, VPR128:$Rm)>;
2439 }
2440
2441 defm : NeonI_Perm_float_pat<"UZP1vvv", Neon_uzp1>;
2442 defm : NeonI_Perm_float_pat<"UZP2vvv", Neon_uzp2>;
2443 defm : NeonI_Perm_float_pat<"ZIP1vvv", Neon_zip1>;
2444 defm : NeonI_Perm_float_pat<"ZIP2vvv", Neon_zip2>;
2445 defm : NeonI_Perm_float_pat<"TRN1vvv", Neon_trn1>;
2446 defm : NeonI_Perm_float_pat<"TRN2vvv", Neon_trn2>;
2447
2448 // The followings are for instruction class (3V Diff)
2449
2450 // normal long/long2 pattern
2451 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
2452                  string asmop, string ResS, string OpS,
2453                  SDPatternOperator opnode, SDPatternOperator ext,
2454                  RegisterOperand OpVPR,
2455                  ValueType ResTy, ValueType OpTy>
2456   : NeonI_3VDiff<q, u, size, opcode,
2457                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2458                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2459                  [(set (ResTy VPR128:$Rd),
2460                     (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
2461                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2462                  NoItinerary>;
2463
2464 multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
2465                         string asmop, SDPatternOperator opnode,
2466                         bit Commutable = 0> {
2467   let isCommutable = Commutable in {
2468     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2469                            opnode, sext, VPR64, v8i16, v8i8>;
2470     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2471                            opnode, sext, VPR64, v4i32, v4i16>;
2472     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2473                            opnode, sext, VPR64, v2i64, v2i32>;
2474   }
2475 }
2476
2477 multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
2478                          SDPatternOperator opnode, bit Commutable = 0> {
2479   let isCommutable = Commutable in {
2480     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2481                             opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2482     def _4s8h  : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2483                             opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2484     def _2d4s  : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2485                             opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2486   }
2487 }
2488
2489 multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
2490                         SDPatternOperator opnode, bit Commutable = 0> {
2491   let isCommutable = Commutable in {
2492     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2493                            opnode, zext, VPR64, v8i16, v8i8>;
2494     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2495                            opnode, zext, VPR64, v4i32, v4i16>;
2496     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2497                            opnode, zext, VPR64, v2i64, v2i32>;
2498   }
2499 }
2500
2501 multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
2502                          SDPatternOperator opnode, bit Commutable = 0> {
2503   let isCommutable = Commutable in {
2504     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2505                             opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2506     def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2507                            opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2508     def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2509                            opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2510   }
2511 }
2512
2513 defm SADDLvvv :  NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
2514 defm UADDLvvv :  NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
2515
2516 defm SADDL2vvv :  NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
2517 defm UADDL2vvv :  NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
2518
2519 defm SSUBLvvv :  NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
2520 defm USUBLvvv :  NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
2521
2522 defm SSUBL2vvv :  NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
2523 defm USUBL2vvv :  NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
2524
2525 // normal wide/wide2 pattern
2526 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
2527                  string asmop, string ResS, string OpS,
2528                  SDPatternOperator opnode, SDPatternOperator ext,
2529                  RegisterOperand OpVPR,
2530                  ValueType ResTy, ValueType OpTy>
2531   : NeonI_3VDiff<q, u, size, opcode,
2532                  (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
2533                  asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
2534                  [(set (ResTy VPR128:$Rd),
2535                     (ResTy (opnode (ResTy VPR128:$Rn),
2536                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2537                  NoItinerary>;
2538
2539 multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
2540                         SDPatternOperator opnode> {
2541   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2542                          opnode, sext, VPR64, v8i16, v8i8>;
2543   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2544                          opnode, sext, VPR64, v4i32, v4i16>;
2545   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2546                          opnode, sext, VPR64, v2i64, v2i32>;
2547 }
2548
2549 defm SADDWvvv :  NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
2550 defm SSUBWvvv :  NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
2551
2552 multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
2553                          SDPatternOperator opnode> {
2554   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2555                           opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2556   def _4s8h  : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2557                           opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2558   def _2d4s  : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2559                           opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2560 }
2561
2562 defm SADDW2vvv :  NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
2563 defm SSUBW2vvv :  NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
2564
2565 multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
2566                         SDPatternOperator opnode> {
2567   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2568                          opnode, zext, VPR64, v8i16, v8i8>;
2569   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2570                          opnode, zext, VPR64, v4i32, v4i16>;
2571   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2572                          opnode, zext, VPR64, v2i64, v2i32>;
2573 }
2574
2575 defm UADDWvvv :  NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
2576 defm USUBWvvv :  NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
2577
2578 multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
2579                          SDPatternOperator opnode> {
2580   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2581                           opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2582   def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2583                          opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2584   def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2585                          opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2586 }
2587
2588 defm UADDW2vvv :  NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
2589 defm USUBW2vvv :  NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
2590
2591 // Get the high half part of the vector element.
2592 multiclass NeonI_get_high {
2593   def _8h : PatFrag<(ops node:$Rn),
2594                     (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
2595                                              (v8i16 (Neon_vdup (i32 8)))))))>;
2596   def _4s : PatFrag<(ops node:$Rn),
2597                     (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
2598                                               (v4i32 (Neon_vdup (i32 16)))))))>;
2599   def _2d : PatFrag<(ops node:$Rn),
2600                     (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
2601                                               (v2i64 (Neon_vdup (i32 32)))))))>;
2602 }
2603
2604 defm NI_get_hi : NeonI_get_high;
2605
2606 // pattern for addhn/subhn with 2 operands
2607 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2608                            string asmop, string ResS, string OpS,
2609                            SDPatternOperator opnode, SDPatternOperator get_hi,
2610                            ValueType ResTy, ValueType OpTy>
2611   : NeonI_3VDiff<q, u, size, opcode,
2612                  (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2613                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2614                  [(set (ResTy VPR64:$Rd),
2615                     (ResTy (get_hi
2616                       (OpTy (opnode (OpTy VPR128:$Rn),
2617                                     (OpTy VPR128:$Rm))))))],
2618                  NoItinerary>;
2619
2620 multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
2621                                 SDPatternOperator opnode, bit Commutable = 0> {
2622   let isCommutable = Commutable in {
2623     def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2624                                      opnode, NI_get_hi_8h, v8i8, v8i16>;
2625     def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2626                                      opnode, NI_get_hi_4s, v4i16, v4i32>;
2627     def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2628                                      opnode, NI_get_hi_2d, v2i32, v2i64>;
2629   }
2630 }
2631
2632 defm ADDHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
2633 defm SUBHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
2634
2635 // pattern for operation with 2 operands
2636 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2637                     string asmop, string ResS, string OpS,
2638                     SDPatternOperator opnode,
2639                     RegisterOperand ResVPR, RegisterOperand OpVPR,
2640                     ValueType ResTy, ValueType OpTy>
2641   : NeonI_3VDiff<q, u, size, opcode,
2642                  (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2643                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2644                  [(set (ResTy ResVPR:$Rd),
2645                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
2646                  NoItinerary>;
2647
2648 // normal narrow pattern
2649 multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
2650                           SDPatternOperator opnode, bit Commutable = 0> {
2651   let isCommutable = Commutable in {
2652     def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2653                               opnode, VPR64, VPR128, v8i8, v8i16>;
2654     def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2655                               opnode, VPR64, VPR128, v4i16, v4i32>;
2656     def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2657                               opnode, VPR64, VPR128, v2i32, v2i64>;
2658   }
2659 }
2660
2661 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
2662 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
2663
2664 // pattern for acle intrinsic with 3 operands
2665 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2666                      string asmop, string ResS, string OpS>
2667   : NeonI_3VDiff<q, u, size, opcode,
2668                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
2669                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2670                  [], NoItinerary> {
2671   let Constraints = "$src = $Rd";
2672   let neverHasSideEffects = 1;
2673 }
2674
2675 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
2676   def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
2677   def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
2678   def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
2679 }
2680
2681 defm ADDHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
2682 defm SUBHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
2683
2684 defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
2685 defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
2686
2687 // Patterns have to be separate because there's a SUBREG_TO_REG in the output
2688 // part.
2689 class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
2690                         SDPatternOperator coreop>
2691   : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2692                       (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
2693                                                         (SrcTy VPR128:$Rm)))))),
2694         (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2695               VPR128:$Rn, VPR128:$Rm)>;
2696
2697 // addhn2 patterns
2698 def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8,  v8i16,
2699           BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
2700 def : NarrowHighHalfPat<ADDHN2vvv_8h4s,  v4i16, v4i32,
2701           BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
2702 def : NarrowHighHalfPat<ADDHN2vvv_4s2d,  v2i32, v2i64,
2703           BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
2704
2705 // subhn2 patterns
2706 def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8,  v8i16,
2707           BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
2708 def : NarrowHighHalfPat<SUBHN2vvv_8h4s,  v4i16, v4i32,
2709           BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
2710 def : NarrowHighHalfPat<SUBHN2vvv_4s2d,  v2i32, v2i64,
2711           BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
2712
2713 // raddhn2 patterns
2714 def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vraddhn>;
2715 def : NarrowHighHalfPat<RADDHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vraddhn>;
2716 def : NarrowHighHalfPat<RADDHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vraddhn>;
2717
2718 // rsubhn2 patterns
2719 def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vrsubhn>;
2720 def : NarrowHighHalfPat<RSUBHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vrsubhn>;
2721 def : NarrowHighHalfPat<RSUBHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vrsubhn>;
2722
2723 // pattern that need to extend result
2724 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
2725                      string asmop, string ResS, string OpS,
2726                      SDPatternOperator opnode,
2727                      RegisterOperand OpVPR,
2728                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2729   : NeonI_3VDiff<q, u, size, opcode,
2730                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2731                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2732                  [(set (ResTy VPR128:$Rd),
2733                     (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
2734                                                 (OpTy OpVPR:$Rm))))))],
2735                  NoItinerary>;
2736
2737 multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
2738                            SDPatternOperator opnode, bit Commutable = 0> {
2739   let isCommutable = Commutable in {
2740     def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2741                                opnode, VPR64, v8i16, v8i8, v8i8>;
2742     def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2743                                opnode, VPR64, v4i32, v4i16, v4i16>;
2744     def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2745                                opnode, VPR64, v2i64, v2i32, v2i32>;
2746   }
2747 }
2748
2749 defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
2750 defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
2751
2752 multiclass NeonI_Op_High<SDPatternOperator op> {
2753   def _16B : PatFrag<(ops node:$Rn, node:$Rm),
2754                      (op (v8i8 (Neon_High16B node:$Rn)),
2755                          (v8i8 (Neon_High16B node:$Rm)))>;
2756   def _8H  : PatFrag<(ops node:$Rn, node:$Rm),
2757                      (op (v4i16 (Neon_High8H node:$Rn)),
2758                          (v4i16 (Neon_High8H node:$Rm)))>;
2759   def _4S  : PatFrag<(ops node:$Rn, node:$Rm),
2760                      (op (v2i32 (Neon_High4S node:$Rn)),
2761                          (v2i32 (Neon_High4S node:$Rm)))>;
2762 }
2763
2764 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
2765 defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
2766 defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
2767 defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
2768 defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
2769 defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
2770
2771 multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
2772                             bit Commutable = 0> {
2773   let isCommutable = Commutable in {
2774     def _8h8b  : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2775                                 !cast<PatFrag>(opnode # "_16B"),
2776                                 VPR128, v8i16, v16i8, v8i8>;
2777     def _4s4h  : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2778                                 !cast<PatFrag>(opnode # "_8H"),
2779                                 VPR128, v4i32, v8i16, v4i16>;
2780     def _2d2s  : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2781                                 !cast<PatFrag>(opnode # "_4S"),
2782                                 VPR128, v2i64, v4i32, v2i32>;
2783   }
2784 }
2785
2786 defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
2787 defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
2788
2789 // For pattern that need two operators being chained.
2790 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
2791                      string asmop, string ResS, string OpS, 
2792                      SDPatternOperator opnode, SDPatternOperator subop,
2793                      RegisterOperand OpVPR,
2794                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2795   : NeonI_3VDiff<q, u, size, opcode,
2796                  (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
2797                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS, 
2798                  [(set (ResTy VPR128:$Rd),
2799                     (ResTy (opnode
2800                       (ResTy VPR128:$src), 
2801                       (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
2802                                                  (OpTy OpVPR:$Rm))))))))],
2803                  NoItinerary> {
2804   let Constraints = "$src = $Rd";
2805 }
2806
2807 multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
2808                              SDPatternOperator opnode, SDPatternOperator subop>{
2809   def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2810                              opnode, subop, VPR64, v8i16, v8i8, v8i8>;
2811   def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2812                              opnode, subop, VPR64, v4i32, v4i16, v4i16>;
2813   def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2814                              opnode, subop, VPR64, v2i64, v2i32, v2i32>;
2815 }
2816
2817 defm SABALvvv :  NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
2818                                    add, int_arm_neon_vabds>;
2819 defm UABALvvv :  NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
2820                                    add, int_arm_neon_vabdu>;
2821
2822 multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
2823                               SDPatternOperator opnode, string subop> {
2824   def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2825                              opnode, !cast<PatFrag>(subop # "_16B"), 
2826                              VPR128, v8i16, v16i8, v8i8>;
2827   def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2828                              opnode, !cast<PatFrag>(subop # "_8H"), 
2829                              VPR128, v4i32, v8i16, v4i16>;
2830   def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2831                              opnode, !cast<PatFrag>(subop # "_4S"), 
2832                              VPR128, v2i64, v4i32, v2i32>;
2833 }
2834
2835 defm SABAL2vvv :  NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
2836                                      "NI_sabdl_hi">;
2837 defm UABAL2vvv :  NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
2838                                      "NI_uabdl_hi">;
2839
2840 // Long pattern with 2 operands
2841 multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
2842                           SDPatternOperator opnode, bit Commutable = 0> {
2843   let isCommutable = Commutable in {
2844     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2845                               opnode, VPR128, VPR64, v8i16, v8i8>;
2846     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2847                               opnode, VPR128, VPR64, v4i32, v4i16>;
2848     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2849                               opnode, VPR128, VPR64, v2i64, v2i32>;
2850   }
2851 }
2852
2853 defm SMULLvvv :  NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
2854 defm UMULLvvv :  NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
2855
2856 class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
2857                            string asmop, string ResS, string OpS,
2858                            SDPatternOperator opnode,
2859                            ValueType ResTy, ValueType OpTy>
2860   : NeonI_3VDiff<q, u, size, opcode,
2861                  (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2862                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2863                  [(set (ResTy VPR128:$Rd),
2864                     (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
2865                  NoItinerary>;
2866
2867 multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
2868                                    string opnode, bit Commutable = 0> {
2869   let isCommutable = Commutable in {
2870     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2871                                       !cast<PatFrag>(opnode # "_16B"),
2872                                       v8i16, v16i8>;
2873     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2874                                      !cast<PatFrag>(opnode # "_8H"),
2875                                      v4i32, v8i16>;
2876     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2877                                      !cast<PatFrag>(opnode # "_4S"),
2878                                      v2i64, v4i32>;
2879   }
2880 }
2881
2882 defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
2883                                          "NI_smull_hi", 1>;
2884 defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
2885                                          "NI_umull_hi", 1>;
2886
2887 // Long pattern with 3 operands
2888 class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2889                      string asmop, string ResS, string OpS,
2890                      SDPatternOperator opnode,
2891                      ValueType ResTy, ValueType OpTy>
2892   : NeonI_3VDiff<q, u, size, opcode,
2893                  (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
2894                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2895                  [(set (ResTy VPR128:$Rd),
2896                     (ResTy (opnode
2897                       (ResTy VPR128:$src),
2898                       (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
2899                NoItinerary> {
2900   let Constraints = "$src = $Rd";
2901 }
2902
2903 multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
2904                              SDPatternOperator opnode> {
2905   def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2906                              opnode, v8i16, v8i8>;
2907   def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2908                              opnode, v4i32, v4i16>;
2909   def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2910                              opnode, v2i64, v2i32>;
2911 }
2912
2913 def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2914                          (add node:$Rd,
2915                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
2916
2917 def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2918                          (add node:$Rd,
2919                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
2920
2921 def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2922                          (sub node:$Rd,
2923                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
2924
2925 def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2926                          (sub node:$Rd,
2927                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
2928
2929 defm SMLALvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
2930 defm UMLALvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
2931
2932 defm SMLSLvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
2933 defm UMLSLvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
2934
2935 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
2936                            string asmop, string ResS, string OpS,
2937                            SDPatternOperator subop, SDPatternOperator opnode,
2938                            RegisterOperand OpVPR,
2939                            ValueType ResTy, ValueType OpTy>
2940   : NeonI_3VDiff<q, u, size, opcode,
2941                (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
2942                asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2943                [(set (ResTy VPR128:$Rd),
2944                   (ResTy (subop
2945                     (ResTy VPR128:$src),
2946                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
2947                NoItinerary> {
2948   let Constraints = "$src = $Rd";
2949 }
2950
2951 multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop, 
2952                                    SDPatternOperator subop, string opnode> {
2953   def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2954                                     subop, !cast<PatFrag>(opnode # "_16B"),
2955                                     VPR128, v8i16, v16i8>;
2956   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2957                                    subop, !cast<PatFrag>(opnode # "_8H"), 
2958                                    VPR128, v4i32, v8i16>;
2959   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2960                                    subop, !cast<PatFrag>(opnode # "_4S"),
2961                                    VPR128, v2i64, v4i32>;
2962 }
2963
2964 defm SMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
2965                                           add, "NI_smull_hi">;
2966 defm UMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
2967                                           add, "NI_umull_hi">;
2968
2969 defm SMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
2970                                           sub, "NI_smull_hi">;
2971 defm UMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
2972                                           sub, "NI_umull_hi">;
2973
2974 multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
2975                                     SDPatternOperator opnode> {
2976   def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2977                                    opnode, int_arm_neon_vqdmull,
2978                                    VPR64, v4i32, v4i16>;
2979   def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2980                                    opnode, int_arm_neon_vqdmull,
2981                                    VPR64, v2i64, v2i32>;
2982 }
2983
2984 defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
2985                                            int_arm_neon_vqadds>;
2986 defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
2987                                            int_arm_neon_vqsubs>;
2988
2989 multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
2990                          SDPatternOperator opnode, bit Commutable = 0> {
2991   let isCommutable = Commutable in {
2992     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2993                               opnode, VPR128, VPR64, v4i32, v4i16>;
2994     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2995                               opnode, VPR128, VPR64, v2i64, v2i32>;
2996   }
2997 }
2998
2999 defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
3000                                 int_arm_neon_vqdmull, 1>;
3001
3002 multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop, 
3003                                    string opnode, bit Commutable = 0> {
3004   let isCommutable = Commutable in {
3005     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3006                                      !cast<PatFrag>(opnode # "_8H"),
3007                                      v4i32, v8i16>;
3008     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3009                                      !cast<PatFrag>(opnode # "_4S"),
3010                                      v2i64, v4i32>;
3011   }
3012 }
3013
3014 defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2", 
3015                                            "NI_qdmull_hi", 1>;
3016
3017 multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop, 
3018                                      SDPatternOperator opnode> {
3019   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3020                                    opnode, NI_qdmull_hi_8H,
3021                                    VPR128, v4i32, v8i16>;
3022   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3023                                    opnode, NI_qdmull_hi_4S,
3024                                    VPR128, v2i64, v4i32>;
3025 }
3026
3027 defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
3028                                              int_arm_neon_vqadds>;
3029 defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
3030                                              int_arm_neon_vqsubs>;
3031
3032 multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
3033                          SDPatternOperator opnode, bit Commutable = 0> {
3034   let isCommutable = Commutable in {
3035     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3036                               opnode, VPR128, VPR64, v8i16, v8i8>;
3037     
3038     def _1q1d : NeonI_3VDiff<0b0, u, 0b11, opcode,
3039                              (outs VPR128:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
3040                              asmop # "\t$Rd.1q, $Rn.1d, $Rm.1d",
3041                              [], NoItinerary>;
3042   }
3043 }
3044
3045 defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp, 1>;
3046
3047 multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop, 
3048                                    string opnode, bit Commutable = 0> {
3049   let isCommutable = Commutable in {
3050     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3051                                       !cast<PatFrag>(opnode # "_16B"),
3052                                       v8i16, v16i8>;
3053     
3054     def _1q2d : NeonI_3VDiff<0b1, u, 0b11, opcode,
3055                              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3056                              asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
3057                              [], NoItinerary>;
3058   }
3059 }
3060
3061 defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
3062                                          1>;
3063
3064 // End of implementation for instruction class (3V Diff)
3065
3066 // The followings are vector load/store multiple N-element structure
3067 // (class SIMD lselem).
3068
3069 // ld1:         load multiple 1-element structure to 1/2/3/4 registers.
3070 // ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
3071 //              The structure consists of a sequence of sets of N values.
3072 //              The first element of the structure is placed in the first lane
3073 //              of the first first vector, the second element in the first lane
3074 //              of the second vector, and so on. 
3075 // E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
3076 // the three 64-bit vectors list {BA, DC, FE}.
3077 // E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
3078 // 64-bit vectors list {DA, EB, FC}.
3079 // Store instructions store multiple structure to N registers like load.
3080
3081
3082 class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
3083                     RegisterOperand VecList, string asmop>
3084   : NeonI_LdStMult<q, 1, opcode, size,
3085                  (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3086                  asmop # "\t$Rt, [$Rn]",
3087                  [],
3088                  NoItinerary> {
3089   let mayLoad = 1;
3090   let neverHasSideEffects = 1;
3091 }
3092
3093 multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
3094   def _8B : NeonI_LDVList<0, opcode, 0b00,
3095                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3096
3097   def _4H : NeonI_LDVList<0, opcode, 0b01,
3098                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3099
3100   def _2S : NeonI_LDVList<0, opcode, 0b10,
3101                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3102
3103   def _16B : NeonI_LDVList<1, opcode, 0b00,
3104                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3105
3106   def _8H : NeonI_LDVList<1, opcode, 0b01,
3107                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3108
3109   def _4S : NeonI_LDVList<1, opcode, 0b10,
3110                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3111
3112   def _2D : NeonI_LDVList<1, opcode, 0b11,
3113                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3114 }
3115
3116 // Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
3117 defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
3118 def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
3119
3120 defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
3121
3122 defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
3123
3124 defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
3125
3126 // Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
3127 defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
3128 def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
3129
3130 defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
3131 def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
3132
3133 defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
3134 def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
3135
3136 class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
3137                     RegisterOperand VecList, string asmop>
3138   : NeonI_LdStMult<q, 0, opcode, size,
3139                  (outs), (ins GPR64xsp:$Rn, VecList:$Rt), 
3140                  asmop # "\t$Rt, [$Rn]",
3141                  [], 
3142                  NoItinerary> {
3143   let mayStore = 1;
3144   let neverHasSideEffects = 1;
3145 }
3146
3147 multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
3148   def _8B : NeonI_STVList<0, opcode, 0b00,
3149                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3150
3151   def _4H : NeonI_STVList<0, opcode, 0b01,
3152                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3153
3154   def _2S : NeonI_STVList<0, opcode, 0b10,
3155                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3156
3157   def _16B : NeonI_STVList<1, opcode, 0b00,
3158                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3159
3160   def _8H : NeonI_STVList<1, opcode, 0b01,
3161                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3162
3163   def _4S : NeonI_STVList<1, opcode, 0b10,
3164                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3165
3166   def _2D : NeonI_STVList<1, opcode, 0b11,
3167                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3168 }
3169
3170 // Store multiple N-element structures from N registers (N = 1,2,3,4)
3171 defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
3172 def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
3173
3174 defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
3175
3176 defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
3177
3178 defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
3179
3180 // Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
3181 defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
3182 def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
3183
3184 defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
3185 def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
3186
3187 defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
3188 def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
3189
3190 def : Pat<(v2f64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
3191 def : Pat<(v2i64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
3192
3193 def : Pat<(v4f32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
3194 def : Pat<(v4i32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
3195
3196 def : Pat<(v8i16 (load GPR64xsp:$addr)), (LD1_8H GPR64xsp:$addr)>;
3197 def : Pat<(v16i8 (load GPR64xsp:$addr)), (LD1_16B GPR64xsp:$addr)>;
3198
3199 def : Pat<(v1f64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
3200 def : Pat<(v1i64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
3201
3202 def : Pat<(v2f32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
3203 def : Pat<(v2i32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
3204
3205 def : Pat<(v4i16 (load GPR64xsp:$addr)), (LD1_4H GPR64xsp:$addr)>;
3206 def : Pat<(v8i8 (load GPR64xsp:$addr)), (LD1_8B GPR64xsp:$addr)>;
3207
3208 def : Pat<(store (v2i64 VPR128:$value), GPR64xsp:$addr),
3209           (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
3210 def : Pat<(store (v2f64 VPR128:$value), GPR64xsp:$addr),
3211           (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
3212
3213 def : Pat<(store (v4i32 VPR128:$value), GPR64xsp:$addr),
3214           (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
3215 def : Pat<(store (v4f32 VPR128:$value), GPR64xsp:$addr),
3216           (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
3217
3218 def : Pat<(store (v8i16 VPR128:$value), GPR64xsp:$addr),
3219           (ST1_8H GPR64xsp:$addr, VPR128:$value)>;
3220 def : Pat<(store (v16i8 VPR128:$value), GPR64xsp:$addr),
3221           (ST1_16B GPR64xsp:$addr, VPR128:$value)>;
3222
3223 def : Pat<(store (v1i64 VPR64:$value), GPR64xsp:$addr),
3224           (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
3225 def : Pat<(store (v1f64 VPR64:$value), GPR64xsp:$addr),
3226           (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
3227
3228 def : Pat<(store (v2i32 VPR64:$value), GPR64xsp:$addr),
3229           (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
3230 def : Pat<(store (v2f32 VPR64:$value), GPR64xsp:$addr),
3231           (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
3232
3233 def : Pat<(store (v4i16 VPR64:$value), GPR64xsp:$addr),
3234           (ST1_4H GPR64xsp:$addr, VPR64:$value)>;
3235 def : Pat<(store (v8i8 VPR64:$value), GPR64xsp:$addr),
3236           (ST1_8B GPR64xsp:$addr, VPR64:$value)>;
3237
3238 // End of vector load/store multiple N-element structure(class SIMD lselem)
3239
3240 // The followings are post-index vector load/store multiple N-element
3241 // structure(class SIMD lselem-post)
3242 def exact1_asmoperand : AsmOperandClass {
3243   let Name = "Exact1";
3244   let PredicateMethod = "isExactImm<1>";
3245   let RenderMethod = "addImmOperands";
3246 }
3247 def uimm_exact1 : Operand<i32>, ImmLeaf<i32, [{return Imm == 1;}]> {
3248   let ParserMatchClass = exact1_asmoperand;
3249 }
3250
3251 def exact2_asmoperand : AsmOperandClass {
3252   let Name = "Exact2";
3253   let PredicateMethod = "isExactImm<2>";
3254   let RenderMethod = "addImmOperands";
3255 }
3256 def uimm_exact2 : Operand<i32>, ImmLeaf<i32, [{return Imm == 2;}]> {
3257   let ParserMatchClass = exact2_asmoperand;
3258 }
3259
3260 def exact3_asmoperand : AsmOperandClass {
3261   let Name = "Exact3";
3262   let PredicateMethod = "isExactImm<3>";
3263   let RenderMethod = "addImmOperands";
3264 }
3265 def uimm_exact3 : Operand<i32>, ImmLeaf<i32, [{return Imm == 3;}]> {
3266   let ParserMatchClass = exact3_asmoperand;
3267 }
3268
3269 def exact4_asmoperand : AsmOperandClass {
3270   let Name = "Exact4";
3271   let PredicateMethod = "isExactImm<4>";
3272   let RenderMethod = "addImmOperands";
3273 }
3274 def uimm_exact4 : Operand<i32>, ImmLeaf<i32, [{return Imm == 4;}]> {
3275   let ParserMatchClass = exact4_asmoperand;
3276 }
3277
3278 def exact6_asmoperand : AsmOperandClass {
3279   let Name = "Exact6";
3280   let PredicateMethod = "isExactImm<6>";
3281   let RenderMethod = "addImmOperands";
3282 }
3283 def uimm_exact6 : Operand<i32>, ImmLeaf<i32, [{return Imm == 6;}]> {
3284   let ParserMatchClass = exact6_asmoperand;
3285 }
3286
3287 def exact8_asmoperand : AsmOperandClass {
3288   let Name = "Exact8";
3289   let PredicateMethod = "isExactImm<8>";
3290   let RenderMethod = "addImmOperands";
3291 }
3292 def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
3293   let ParserMatchClass = exact8_asmoperand;
3294 }
3295
3296 def exact12_asmoperand : AsmOperandClass {
3297   let Name = "Exact12";
3298   let PredicateMethod = "isExactImm<12>";
3299   let RenderMethod = "addImmOperands";
3300 }
3301 def uimm_exact12 : Operand<i32>, ImmLeaf<i32, [{return Imm == 12;}]> {
3302   let ParserMatchClass = exact12_asmoperand;
3303 }
3304
3305 def exact16_asmoperand : AsmOperandClass {
3306   let Name = "Exact16";
3307   let PredicateMethod = "isExactImm<16>";
3308   let RenderMethod = "addImmOperands";
3309 }
3310 def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
3311   let ParserMatchClass = exact16_asmoperand;
3312 }
3313
3314 def exact24_asmoperand : AsmOperandClass {
3315   let Name = "Exact24";
3316   let PredicateMethod = "isExactImm<24>";
3317   let RenderMethod = "addImmOperands";
3318 }
3319 def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
3320   let ParserMatchClass = exact24_asmoperand;
3321 }
3322
3323 def exact32_asmoperand : AsmOperandClass {
3324   let Name = "Exact32";
3325   let PredicateMethod = "isExactImm<32>";
3326   let RenderMethod = "addImmOperands";
3327 }
3328 def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
3329   let ParserMatchClass = exact32_asmoperand;
3330 }
3331
3332 def exact48_asmoperand : AsmOperandClass {
3333   let Name = "Exact48";
3334   let PredicateMethod = "isExactImm<48>";
3335   let RenderMethod = "addImmOperands";
3336 }
3337 def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
3338   let ParserMatchClass = exact48_asmoperand;
3339 }
3340
3341 def exact64_asmoperand : AsmOperandClass {
3342   let Name = "Exact64";
3343   let PredicateMethod = "isExactImm<64>";
3344   let RenderMethod = "addImmOperands";
3345 }
3346 def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
3347   let ParserMatchClass = exact64_asmoperand;
3348 }
3349
3350 multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
3351                            RegisterOperand VecList, Operand ImmTy,
3352                            string asmop> {
3353   let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1, 
3354       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3355     def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
3356                      (outs VecList:$Rt, GPR64xsp:$wb),
3357                      (ins GPR64xsp:$Rn, ImmTy:$amt), 
3358                      asmop # "\t$Rt, [$Rn], $amt",
3359                      [],
3360                      NoItinerary> {
3361       let Rm = 0b11111;
3362     }
3363
3364     def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
3365                         (outs VecList:$Rt, GPR64xsp:$wb),
3366                         (ins GPR64xsp:$Rn, GPR64noxzr:$Rm), 
3367                         asmop # "\t$Rt, [$Rn], $Rm",
3368                         [],
3369                         NoItinerary>;
3370   }
3371 }
3372
3373 multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3374     Operand ImmTy2, string asmop> {
3375   defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
3376                               !cast<RegisterOperand>(List # "8B_operand"),
3377                               ImmTy, asmop>;
3378
3379   defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
3380                               !cast<RegisterOperand>(List # "4H_operand"),
3381                               ImmTy, asmop>;
3382
3383   defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
3384                               !cast<RegisterOperand>(List # "2S_operand"),
3385                               ImmTy, asmop>;
3386
3387   defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
3388                                !cast<RegisterOperand>(List # "16B_operand"),
3389                                ImmTy2, asmop>;
3390
3391   defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
3392                               !cast<RegisterOperand>(List # "8H_operand"),
3393                               ImmTy2, asmop>;
3394
3395   defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
3396                               !cast<RegisterOperand>(List # "4S_operand"),
3397                               ImmTy2, asmop>;
3398
3399   defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
3400                               !cast<RegisterOperand>(List # "2D_operand"),
3401                               ImmTy2, asmop>;
3402 }
3403
3404 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3405 defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
3406 defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3407                                  "ld1">;
3408
3409 defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
3410
3411 defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3412                              "ld3">;
3413
3414 defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
3415
3416 // Post-index load multiple 1-element structures from N consecutive registers
3417 // (N = 2,3,4)
3418 defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3419                                "ld1">;
3420 defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3421                                    uimm_exact16, "ld1">;
3422
3423 defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3424                                "ld1">;
3425 defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3426                                    uimm_exact24, "ld1">;
3427
3428 defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3429                                 "ld1">;
3430 defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3431                                    uimm_exact32, "ld1">;
3432
3433 multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
3434                             RegisterOperand VecList, Operand ImmTy,
3435                             string asmop> {
3436   let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
3437       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3438     def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
3439                      (outs GPR64xsp:$wb),
3440                      (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
3441                      asmop # "\t$Rt, [$Rn], $amt",
3442                      [],
3443                      NoItinerary> {
3444       let Rm = 0b11111;
3445     }
3446
3447     def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
3448                       (outs GPR64xsp:$wb),
3449                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt), 
3450                       asmop # "\t$Rt, [$Rn], $Rm",
3451                       [],
3452                       NoItinerary>;
3453   }
3454 }
3455
3456 multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3457                            Operand ImmTy2, string asmop> {
3458   defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
3459                  !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
3460
3461   defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
3462                               !cast<RegisterOperand>(List # "4H_operand"),
3463                               ImmTy, asmop>;
3464
3465   defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
3466                               !cast<RegisterOperand>(List # "2S_operand"),
3467                               ImmTy, asmop>;
3468
3469   defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
3470                                !cast<RegisterOperand>(List # "16B_operand"),
3471                                ImmTy2, asmop>;
3472
3473   defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
3474                               !cast<RegisterOperand>(List # "8H_operand"),
3475                               ImmTy2, asmop>;
3476
3477   defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
3478                               !cast<RegisterOperand>(List # "4S_operand"),
3479                               ImmTy2, asmop>;
3480
3481   defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
3482                               !cast<RegisterOperand>(List # "2D_operand"),
3483                               ImmTy2, asmop>;
3484 }
3485
3486 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3487 defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
3488 defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3489                                  "st1">;
3490
3491 defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
3492
3493 defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3494                              "st3">;
3495
3496 defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
3497
3498 // Post-index load multiple 1-element structures from N consecutive registers
3499 // (N = 2,3,4)
3500 defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3501                                "st1">;
3502 defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3503                                    uimm_exact16, "st1">;
3504
3505 defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3506                                "st1">;
3507 defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3508                                    uimm_exact24, "st1">;
3509
3510 defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3511                                "st1">;
3512 defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3513                                    uimm_exact32, "st1">;
3514
3515 // End of post-index vector load/store multiple N-element structure
3516 // (class SIMD lselem-post)
3517
3518 // The followings are vector load/store single N-element structure
3519 // (class SIMD lsone).
3520 def neon_uimm0_bare : Operand<i64>,
3521                         ImmLeaf<i64, [{return Imm == 0;}]> {
3522   let ParserMatchClass = neon_uimm0_asmoperand;
3523   let PrintMethod = "printUImmBareOperand";
3524 }
3525
3526 def neon_uimm1_bare : Operand<i64>,
3527                         ImmLeaf<i64, [{return Imm < 2;}]> {
3528   let ParserMatchClass = neon_uimm1_asmoperand;
3529   let PrintMethod = "printUImmBareOperand";
3530 }
3531
3532 def neon_uimm2_bare : Operand<i64>,
3533                         ImmLeaf<i64, [{return Imm < 4;}]> {
3534   let ParserMatchClass = neon_uimm2_asmoperand;
3535   let PrintMethod = "printUImmBareOperand";
3536 }
3537
3538 def neon_uimm3_bare : Operand<i64>,
3539                         ImmLeaf<i64, [{return Imm < 8;}]> {
3540   let ParserMatchClass = uimm3_asmoperand;
3541   let PrintMethod = "printUImmBareOperand";
3542 }
3543
3544 def neon_uimm4_bare : Operand<i64>,
3545                         ImmLeaf<i64, [{return Imm < 16;}]> {
3546   let ParserMatchClass = uimm4_asmoperand;
3547   let PrintMethod = "printUImmBareOperand";
3548 }
3549
3550 class NeonI_LDN_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
3551                     RegisterOperand VecList, string asmop>
3552     : NeonI_LdOne_Dup<q, r, opcode, size,
3553                       (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3554                       asmop # "\t$Rt, [$Rn]",
3555                       [],
3556                       NoItinerary> {
3557   let mayLoad = 1;
3558   let neverHasSideEffects = 1;
3559 }
3560
3561 multiclass LDN_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop> {
3562   def _8B : NeonI_LDN_Dup<0, r, opcode, 0b00,
3563                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3564
3565   def _4H : NeonI_LDN_Dup<0, r, opcode, 0b01,
3566                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3567
3568   def _2S : NeonI_LDN_Dup<0, r, opcode, 0b10,
3569                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3570
3571   def _1D : NeonI_LDN_Dup<0, r, opcode, 0b11,
3572                           !cast<RegisterOperand>(List # "1D_operand"), asmop>;
3573
3574   def _16B : NeonI_LDN_Dup<1, r, opcode, 0b00,
3575                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3576
3577   def _8H : NeonI_LDN_Dup<1, r, opcode, 0b01,
3578                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3579
3580   def _4S : NeonI_LDN_Dup<1, r, opcode, 0b10,
3581                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3582
3583   def _2D : NeonI_LDN_Dup<1, r, opcode, 0b11,
3584                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3585 }
3586
3587 // Load single 1-element structure to all lanes of 1 register
3588 defm LD1R : LDN_Dup_BHSD<0b0, 0b110, "VOne", "ld1r">;
3589
3590 // Load single N-element structure to all lanes of N consecutive 
3591 // registers (N = 2,3,4)
3592 defm LD2R : LDN_Dup_BHSD<0b1, 0b110, "VPair", "ld2r">;
3593 defm LD3R : LDN_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r">;
3594 defm LD4R : LDN_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r">;
3595
3596
3597 class LD1R_pattern <ValueType VTy, ValueType DTy, PatFrag LoadOp,
3598                     Instruction INST>
3599     : Pat<(VTy (Neon_vdup (DTy (LoadOp GPR64xsp:$Rn)))),
3600           (VTy (INST GPR64xsp:$Rn))>;
3601
3602 // Match all LD1R instructions
3603 def : LD1R_pattern<v8i8, i32, extloadi8, LD1R_8B>;
3604
3605 def : LD1R_pattern<v16i8, i32, extloadi8, LD1R_16B>;
3606
3607 def : LD1R_pattern<v4i16, i32, extloadi16, LD1R_4H>;
3608
3609 def : LD1R_pattern<v8i16, i32, extloadi16, LD1R_8H>;
3610
3611 def : LD1R_pattern<v2i32, i32, load, LD1R_2S>;
3612 def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
3613
3614 def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
3615 def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
3616
3617 def : LD1R_pattern<v1i64, i64, load, LD1R_1D>;
3618 def : LD1R_pattern<v1f64, f64, load, LD1R_1D>;
3619
3620 def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
3621 def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
3622
3623
3624 multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
3625                                 RegisterClass RegList> {
3626   defm B : VectorList_operands<PREFIX, "B", Count, RegList>;
3627   defm H : VectorList_operands<PREFIX, "H", Count, RegList>;
3628   defm S : VectorList_operands<PREFIX, "S", Count, RegList>;
3629   defm D : VectorList_operands<PREFIX, "D", Count, RegList>;
3630 }
3631
3632 // Special vector list operand of 128-bit vectors with bare layout.
3633 // i.e. only show ".b", ".h", ".s", ".d"
3634 defm VOne : VectorList_Bare_BHSD<"VOne", 1, FPR128>;
3635 defm VPair : VectorList_Bare_BHSD<"VPair", 2, QPair>;
3636 defm VTriple : VectorList_Bare_BHSD<"VTriple", 3, QTriple>;
3637 defm VQuad : VectorList_Bare_BHSD<"VQuad", 4, QQuad>;
3638
3639 class NeonI_LDN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
3640                      Operand ImmOp, string asmop>
3641     : NeonI_LdStOne_Lane<1, r, op2_1, op0,
3642                          (outs VList:$Rt),
3643                          (ins GPR64xsp:$Rn, VList:$src, ImmOp:$lane),
3644                          asmop # "\t$Rt[$lane], [$Rn]",
3645                          [],
3646                          NoItinerary> {
3647   let mayLoad = 1;
3648   let neverHasSideEffects = 1;
3649   let hasExtraDefRegAllocReq = 1;
3650   let Constraints = "$src = $Rt";
3651 }
3652
3653 multiclass LDN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
3654   def _B : NeonI_LDN_Lane<r, 0b00, op0,
3655                           !cast<RegisterOperand>(List # "B_operand"),
3656                           neon_uimm4_bare, asmop> {
3657     let Inst{12-10} = lane{2-0};
3658     let Inst{30} = lane{3};
3659   }
3660
3661   def _H : NeonI_LDN_Lane<r, 0b01, op0,
3662                           !cast<RegisterOperand>(List # "H_operand"),
3663                           neon_uimm3_bare, asmop> {
3664     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
3665     let Inst{30} = lane{2};
3666   }
3667
3668   def _S : NeonI_LDN_Lane<r, 0b10, op0,
3669                           !cast<RegisterOperand>(List # "S_operand"),
3670                           neon_uimm2_bare, asmop> {
3671     let Inst{12-10} = {lane{0}, 0b0, 0b0};
3672     let Inst{30} = lane{1};
3673   }
3674   
3675   def _D : NeonI_LDN_Lane<r, 0b10, op0,
3676                           !cast<RegisterOperand>(List # "D_operand"),
3677                           neon_uimm1_bare, asmop> {
3678     let Inst{12-10} = 0b001;
3679     let Inst{30} = lane{0};
3680   }
3681 }
3682
3683 // Load single 1-element structure to one lane of 1 register.
3684 defm LD1LN : LDN_Lane_BHSD<0b0, 0b0, "VOne", "ld1">;
3685
3686 // Load single N-element structure to one lane of N consecutive registers
3687 // (N = 2,3,4)
3688 defm LD2LN : LDN_Lane_BHSD<0b1, 0b0, "VPair", "ld2">;
3689 defm LD3LN : LDN_Lane_BHSD<0b0, 0b1, "VTriple", "ld3">;
3690 defm LD4LN : LDN_Lane_BHSD<0b1, 0b1, "VQuad", "ld4">;
3691
3692 multiclass LD1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
3693                           Operand ImmOp, Operand ImmOp2, PatFrag LoadOp,
3694                           Instruction INST> {
3695   def : Pat<(VTy (vector_insert (VTy VPR64:$src),
3696                      (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp:$lane))),
3697             (VTy (EXTRACT_SUBREG 
3698                      (INST GPR64xsp:$Rn, 
3699                            (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
3700                            ImmOp:$lane),
3701                      sub_64))>;
3702
3703   def : Pat<(VTy2 (vector_insert (VTy2 VPR128:$src),
3704                       (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp2:$lane))),
3705             (VTy2 (INST GPR64xsp:$Rn, VPR128:$src, ImmOp2:$lane))>;
3706 }
3707
3708 // Match all LD1LN instructions
3709 defm : LD1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
3710                       extloadi8, LD1LN_B>;
3711
3712 defm : LD1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
3713                       extloadi16, LD1LN_H>;
3714
3715 defm : LD1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
3716                       load, LD1LN_S>;
3717 defm : LD1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
3718                       load, LD1LN_S>;
3719
3720 defm : LD1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
3721                       load, LD1LN_D>;
3722 defm : LD1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
3723                       load, LD1LN_D>;
3724
3725 class NeonI_STN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
3726                      Operand ImmOp, string asmop>
3727     : NeonI_LdStOne_Lane<0, r, op2_1, op0,
3728                          (outs), (ins GPR64xsp:$Rn, VList:$Rt, ImmOp:$lane),
3729                          asmop # "\t$Rt[$lane], [$Rn]",
3730                          [],
3731                          NoItinerary> {
3732   let mayStore = 1;
3733   let neverHasSideEffects = 1;
3734   let hasExtraDefRegAllocReq = 1;
3735 }
3736
3737 multiclass STN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
3738   def _B : NeonI_STN_Lane<r, 0b00, op0,
3739                           !cast<RegisterOperand>(List # "B_operand"),
3740                           neon_uimm4_bare, asmop> {
3741     let Inst{12-10} = lane{2-0};
3742     let Inst{30} = lane{3};
3743   }
3744
3745   def _H : NeonI_STN_Lane<r, 0b01, op0,
3746                           !cast<RegisterOperand>(List # "H_operand"),
3747                           neon_uimm3_bare, asmop> {
3748     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
3749     let Inst{30} = lane{2};
3750   }
3751
3752   def _S : NeonI_STN_Lane<r, 0b10, op0,
3753                           !cast<RegisterOperand>(List # "S_operand"),
3754                            neon_uimm2_bare, asmop> {
3755     let Inst{12-10} = {lane{0}, 0b0, 0b0};
3756     let Inst{30} = lane{1};
3757   }
3758   
3759   def _D : NeonI_STN_Lane<r, 0b10, op0,
3760                           !cast<RegisterOperand>(List # "D_operand"),
3761                           neon_uimm1_bare, asmop>{
3762     let Inst{12-10} = 0b001;
3763     let Inst{30} = lane{0};
3764   }
3765 }
3766
3767 // Store single 1-element structure from one lane of 1 register.
3768 defm ST1LN : STN_Lane_BHSD<0b0, 0b0, "VOne", "st1">;
3769
3770 // Store single N-element structure from one lane of N consecutive registers
3771 // (N = 2,3,4)
3772 defm ST2LN : STN_Lane_BHSD<0b1, 0b0, "VPair", "st2">;
3773 defm ST3LN : STN_Lane_BHSD<0b0, 0b1, "VTriple", "st3">;
3774 defm ST4LN : STN_Lane_BHSD<0b1, 0b1, "VQuad", "st4">;
3775
3776 multiclass ST1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
3777                           Operand ImmOp, Operand ImmOp2, PatFrag StoreOp,
3778                           Instruction INST> {
3779   def : Pat<(StoreOp (DTy (vector_extract (VTy VPR64:$Rt), ImmOp:$lane)),
3780                      GPR64xsp:$Rn),
3781             (INST GPR64xsp:$Rn,
3782                   (SUBREG_TO_REG (i64 0), VPR64:$Rt, sub_64),
3783                   ImmOp:$lane)>;
3784
3785   def : Pat<(StoreOp (DTy (vector_extract (VTy2 VPR128:$Rt), ImmOp2:$lane)),
3786                      GPR64xsp:$Rn),
3787             (INST GPR64xsp:$Rn, VPR128:$Rt, ImmOp2:$lane)>;
3788 }
3789
3790 // Match all ST1LN instructions
3791 defm : ST1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
3792                       truncstorei8, ST1LN_B>;
3793
3794 defm : ST1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
3795                       truncstorei16, ST1LN_H>;
3796
3797 defm : ST1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
3798                       store, ST1LN_S>;
3799 defm : ST1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
3800                       store, ST1LN_S>;
3801
3802 defm : ST1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
3803                       store, ST1LN_D>;
3804 defm : ST1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
3805                       store, ST1LN_D>;
3806
3807 // End of vector load/store single N-element structure (class SIMD lsone).
3808
3809
3810 // The following are post-index load/store single N-element instructions
3811 // (class SIMD lsone-post)
3812
3813 multiclass NeonI_LDN_WB_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
3814                             RegisterOperand VecList, Operand ImmTy,
3815                             string asmop> {
3816   let mayLoad = 1, neverHasSideEffects = 1, Constraints = "$wb = $Rn",
3817   DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
3818     def _fixed : NeonI_LdOne_Dup_Post<q, r, opcode, size,
3819                       (outs VecList:$Rt, GPR64xsp:$wb),
3820                       (ins GPR64xsp:$Rn, ImmTy:$amt),
3821                       asmop # "\t$Rt, [$Rn], $amt",
3822                       [],
3823                       NoItinerary> {
3824                         let Rm = 0b11111;
3825                       }
3826
3827     def _register : NeonI_LdOne_Dup_Post<q, r, opcode, size,
3828                       (outs VecList:$Rt, GPR64xsp:$wb),
3829                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
3830                       asmop # "\t$Rt, [$Rn], $Rm",
3831                       [],
3832                       NoItinerary>;
3833   }
3834 }
3835
3836 multiclass LDWB_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop,
3837                          Operand uimm_b, Operand uimm_h,
3838                          Operand uimm_s, Operand uimm_d> {
3839   defm _8B : NeonI_LDN_WB_Dup<0, r, opcode, 0b00,
3840                               !cast<RegisterOperand>(List # "8B_operand"),
3841                               uimm_b, asmop>;
3842
3843   defm _4H : NeonI_LDN_WB_Dup<0, r, opcode, 0b01,
3844                               !cast<RegisterOperand>(List # "4H_operand"),
3845                               uimm_h, asmop>;
3846
3847   defm _2S : NeonI_LDN_WB_Dup<0, r, opcode, 0b10,
3848                               !cast<RegisterOperand>(List # "2S_operand"),
3849                               uimm_s, asmop>;
3850
3851   defm _1D : NeonI_LDN_WB_Dup<0, r, opcode, 0b11,
3852                               !cast<RegisterOperand>(List # "1D_operand"),
3853                               uimm_d, asmop>;
3854
3855   defm _16B : NeonI_LDN_WB_Dup<1, r, opcode, 0b00,
3856                                !cast<RegisterOperand>(List # "16B_operand"),
3857                                uimm_b, asmop>;
3858
3859   defm _8H : NeonI_LDN_WB_Dup<1, r, opcode, 0b01,
3860                               !cast<RegisterOperand>(List # "8H_operand"),
3861                               uimm_h, asmop>;
3862
3863   defm _4S : NeonI_LDN_WB_Dup<1, r, opcode, 0b10,
3864                               !cast<RegisterOperand>(List # "4S_operand"),
3865                               uimm_s, asmop>;
3866
3867   defm _2D : NeonI_LDN_WB_Dup<1, r, opcode, 0b11,
3868                               !cast<RegisterOperand>(List # "2D_operand"),
3869                               uimm_d, asmop>;
3870 }
3871
3872 // Post-index load single 1-element structure to all lanes of 1 register
3873 defm LD1R_WB : LDWB_Dup_BHSD<0b0, 0b110, "VOne", "ld1r", uimm_exact1,
3874                              uimm_exact2, uimm_exact4, uimm_exact8>;
3875
3876 // Post-index load single N-element structure to all lanes of N consecutive 
3877 // registers (N = 2,3,4)
3878 defm LD2R_WB : LDWB_Dup_BHSD<0b1, 0b110, "VPair", "ld2r", uimm_exact2,
3879                              uimm_exact4, uimm_exact8, uimm_exact16>;
3880 defm LD3R_WB : LDWB_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r", uimm_exact3,
3881                              uimm_exact6, uimm_exact12, uimm_exact24>;
3882 defm LD4R_WB : LDWB_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r", uimm_exact4,
3883                              uimm_exact8, uimm_exact16, uimm_exact32>;
3884
3885 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1, 
3886     Constraints = "$Rn = $wb, $Rt = $src",
3887     DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
3888   class LDN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
3889                                 Operand ImmTy, Operand ImmOp, string asmop>
3890       : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
3891                                 (outs VList:$Rt, GPR64xsp:$wb),
3892                                 (ins GPR64xsp:$Rn, ImmTy:$amt,
3893                                     VList:$src, ImmOp:$lane),
3894                                 asmop # "\t$Rt[$lane], [$Rn], $amt",
3895                                 [],
3896                                 NoItinerary> {
3897     let Rm = 0b11111;
3898   }
3899
3900   class LDN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
3901                                  Operand ImmTy, Operand ImmOp, string asmop>
3902       : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
3903                                 (outs VList:$Rt, GPR64xsp:$wb),
3904                                 (ins GPR64xsp:$Rn, GPR64noxzr:$Rm,
3905                                     VList:$src, ImmOp:$lane),
3906                                 asmop # "\t$Rt[$lane], [$Rn], $Rm",
3907                                 [],
3908                                 NoItinerary>;
3909 }
3910
3911 multiclass LD_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
3912                            Operand uimm_b, Operand uimm_h,
3913                            Operand uimm_s, Operand uimm_d> {
3914   def _B_fixed : LDN_WBFx_Lane<r, 0b00, op0,
3915                                !cast<RegisterOperand>(List # "B_operand"),
3916                                uimm_b, neon_uimm4_bare, asmop> {
3917     let Inst{12-10} = lane{2-0};
3918     let Inst{30} = lane{3};
3919   }
3920
3921   def _B_register : LDN_WBReg_Lane<r, 0b00, op0,
3922                                    !cast<RegisterOperand>(List # "B_operand"),
3923                                    uimm_b, neon_uimm4_bare, asmop> {
3924     let Inst{12-10} = lane{2-0};
3925     let Inst{30} = lane{3};
3926   }
3927   
3928   def _H_fixed : LDN_WBFx_Lane<r, 0b01, op0,
3929                                !cast<RegisterOperand>(List # "H_operand"),
3930                                uimm_h, neon_uimm3_bare, asmop> {
3931     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
3932     let Inst{30} = lane{2};
3933   }
3934   
3935   def _H_register : LDN_WBReg_Lane<r, 0b01, op0,
3936                                    !cast<RegisterOperand>(List # "H_operand"),
3937                                    uimm_h, neon_uimm3_bare, asmop> {
3938     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
3939     let Inst{30} = lane{2};
3940   }
3941
3942   def _S_fixed : LDN_WBFx_Lane<r, 0b10, op0,
3943                                !cast<RegisterOperand>(List # "S_operand"),
3944                                uimm_s, neon_uimm2_bare, asmop> {
3945     let Inst{12-10} = {lane{0}, 0b0, 0b0};
3946     let Inst{30} = lane{1};
3947   }
3948
3949   def _S_register : LDN_WBReg_Lane<r, 0b10, op0,
3950                                    !cast<RegisterOperand>(List # "S_operand"),
3951                                    uimm_s, neon_uimm2_bare, asmop> {
3952     let Inst{12-10} = {lane{0}, 0b0, 0b0};
3953     let Inst{30} = lane{1};
3954   }
3955   
3956   def _D_fixed : LDN_WBFx_Lane<r, 0b10, op0,
3957                                !cast<RegisterOperand>(List # "D_operand"),
3958                                uimm_d, neon_uimm1_bare, asmop> {
3959     let Inst{12-10} = 0b001;
3960     let Inst{30} = lane{0};
3961   }
3962
3963   def _D_register : LDN_WBReg_Lane<r, 0b10, op0,
3964                                    !cast<RegisterOperand>(List # "D_operand"),
3965                                    uimm_d, neon_uimm1_bare, asmop> {
3966     let Inst{12-10} = 0b001;
3967     let Inst{30} = lane{0};
3968   }
3969 }
3970
3971 // Post-index load single 1-element structure to one lane of 1 register.
3972 defm LD1LN_WB : LD_Lane_WB_BHSD<0b0, 0b0, "VOne", "ld1", uimm_exact1,
3973                                 uimm_exact2, uimm_exact4, uimm_exact8>;
3974
3975 // Post-index load single N-element structure to one lane of N consecutive
3976 // registers
3977 // (N = 2,3,4)
3978 defm LD2LN_WB : LD_Lane_WB_BHSD<0b1, 0b0, "VPair", "ld2", uimm_exact2,
3979                                 uimm_exact4, uimm_exact8, uimm_exact16>;
3980 defm LD3LN_WB : LD_Lane_WB_BHSD<0b0, 0b1, "VTriple", "ld3", uimm_exact3,
3981                                 uimm_exact6, uimm_exact12, uimm_exact24>;
3982 defm LD4LN_WB : LD_Lane_WB_BHSD<0b1, 0b1, "VQuad", "ld4", uimm_exact4,
3983                                 uimm_exact8, uimm_exact16, uimm_exact32>;
3984
3985 let mayStore = 1, neverHasSideEffects = 1,
3986     hasExtraDefRegAllocReq = 1, Constraints = "$Rn = $wb",
3987     DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
3988   class STN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
3989                       Operand ImmTy, Operand ImmOp, string asmop>
3990       : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
3991                                 (outs GPR64xsp:$wb),
3992                                 (ins GPR64xsp:$Rn, ImmTy:$amt,
3993                                     VList:$Rt, ImmOp:$lane),
3994                                 asmop # "\t$Rt[$lane], [$Rn], $amt",
3995                                 [],
3996                                 NoItinerary> {
3997     let Rm = 0b11111;
3998   }
3999
4000   class STN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
4001                        Operand ImmTy, Operand ImmOp, string asmop>
4002       : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
4003                                 (outs GPR64xsp:$wb),
4004                                 (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VList:$Rt,
4005                                     ImmOp:$lane),
4006                                 asmop # "\t$Rt[$lane], [$Rn], $Rm",
4007                                 [],
4008                                 NoItinerary>;
4009 }
4010
4011 multiclass ST_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
4012                            Operand uimm_b, Operand uimm_h,
4013                            Operand uimm_s, Operand uimm_d> {
4014   def _B_fixed : STN_WBFx_Lane<r, 0b00, op0,
4015                                !cast<RegisterOperand>(List # "B_operand"),
4016                                uimm_b, neon_uimm4_bare, asmop> {
4017     let Inst{12-10} = lane{2-0};
4018     let Inst{30} = lane{3};
4019   }
4020
4021   def _B_register : STN_WBReg_Lane<r, 0b00, op0,
4022                                    !cast<RegisterOperand>(List # "B_operand"),
4023                                    uimm_b, neon_uimm4_bare, asmop> {
4024     let Inst{12-10} = lane{2-0};
4025     let Inst{30} = lane{3};
4026   }
4027   
4028   def _H_fixed : STN_WBFx_Lane<r, 0b01, op0,
4029                                !cast<RegisterOperand>(List # "H_operand"),
4030                                uimm_h, neon_uimm3_bare, asmop> {
4031     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4032     let Inst{30} = lane{2};
4033   }
4034   
4035   def _H_register : STN_WBReg_Lane<r, 0b01, op0,
4036                                    !cast<RegisterOperand>(List # "H_operand"),
4037                                    uimm_h, neon_uimm3_bare, asmop> {
4038     let Inst{12-10} = {lane{1}, lane{0}, 0b0};
4039     let Inst{30} = lane{2};
4040   }
4041
4042   def _S_fixed : STN_WBFx_Lane<r, 0b10, op0,
4043                                !cast<RegisterOperand>(List # "S_operand"),
4044                                uimm_s, neon_uimm2_bare, asmop> {
4045     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4046     let Inst{30} = lane{1};
4047   }
4048
4049   def _S_register : STN_WBReg_Lane<r, 0b10, op0,
4050                                    !cast<RegisterOperand>(List # "S_operand"),
4051                                    uimm_s, neon_uimm2_bare, asmop> {
4052     let Inst{12-10} = {lane{0}, 0b0, 0b0};
4053     let Inst{30} = lane{1};
4054   }
4055   
4056   def _D_fixed : STN_WBFx_Lane<r, 0b10, op0,
4057                                !cast<RegisterOperand>(List # "D_operand"),
4058                                uimm_d, neon_uimm1_bare, asmop> {
4059     let Inst{12-10} = 0b001;
4060     let Inst{30} = lane{0};
4061   }
4062
4063   def _D_register : STN_WBReg_Lane<r, 0b10, op0,
4064                                    !cast<RegisterOperand>(List # "D_operand"),
4065                                    uimm_d, neon_uimm1_bare, asmop> {
4066     let Inst{12-10} = 0b001;
4067     let Inst{30} = lane{0};
4068   }
4069 }
4070
4071 // Post-index store single 1-element structure from one lane of 1 register.
4072 defm ST1LN_WB : ST_Lane_WB_BHSD<0b0, 0b0, "VOne", "st1", uimm_exact1,
4073                                 uimm_exact2, uimm_exact4, uimm_exact8>;
4074
4075 // Post-index store single N-element structure from one lane of N consecutive
4076 // registers (N = 2,3,4)
4077 defm ST2LN_WB : ST_Lane_WB_BHSD<0b1, 0b0, "VPair", "st2", uimm_exact2,
4078                                 uimm_exact4, uimm_exact8, uimm_exact16>;
4079 defm ST3LN_WB : ST_Lane_WB_BHSD<0b0, 0b1, "VTriple", "st3", uimm_exact3,
4080                                 uimm_exact6, uimm_exact12, uimm_exact24>;
4081 defm ST4LN_WB : ST_Lane_WB_BHSD<0b1, 0b1, "VQuad", "st4", uimm_exact4,
4082                                 uimm_exact8, uimm_exact16, uimm_exact32>;
4083
4084 // End of post-index load/store single N-element instructions
4085 // (class SIMD lsone-post)
4086
4087 // Neon Scalar instructions implementation
4088 // Scalar Three Same
4089
4090 class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
4091                              RegisterClass FPRC>
4092   : NeonI_Scalar3Same<u, size, opcode,
4093                       (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
4094                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4095                       [],
4096                       NoItinerary>;
4097
4098 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
4099   : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
4100
4101 multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
4102                                       bit Commutable = 0> {
4103   let isCommutable = Commutable in {
4104     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
4105     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
4106   }
4107 }
4108
4109 multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
4110                                       string asmop, bit Commutable = 0> {
4111   let isCommutable = Commutable in {
4112     def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
4113     def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
4114   }
4115 }
4116
4117 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
4118                                         string asmop, bit Commutable = 0> {
4119   let isCommutable = Commutable in {
4120     def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
4121     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
4122     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
4123     def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
4124   }
4125 }
4126
4127 multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
4128                                             Instruction INSTD> {
4129   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
4130             (INSTD FPR64:$Rn, FPR64:$Rm)>;        
4131 }
4132
4133 multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
4134                                                Instruction INSTB,
4135                                                Instruction INSTH,
4136                                                Instruction INSTS,
4137                                                Instruction INSTD>
4138   : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
4139   def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
4140            (INSTB FPR8:$Rn, FPR8:$Rm)>;
4141
4142   def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4143            (INSTH FPR16:$Rn, FPR16:$Rm)>;
4144
4145   def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4146            (INSTS FPR32:$Rn, FPR32:$Rm)>;
4147 }
4148
4149 class Neon_Scalar3Same_cmp_D_size_patterns<SDPatternOperator opnode,
4150                                            Instruction INSTD>
4151   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
4152         (INSTD FPR64:$Rn, FPR64:$Rm)>;
4153
4154 multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
4155                                              Instruction INSTH,
4156                                              Instruction INSTS> {
4157   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4158             (INSTH FPR16:$Rn, FPR16:$Rm)>;
4159   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4160             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4161 }
4162
4163 multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
4164                                              Instruction INSTS,
4165                                              Instruction INSTD> {
4166   def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
4167             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4168   def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4169             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4170 }
4171
4172 multiclass Neon_Scalar3Same_cmp_SD_size_patterns<SDPatternOperator opnode,
4173                                                  Instruction INSTS,
4174                                                  Instruction INSTD> {
4175   def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
4176             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4177   def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4178             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4179 }
4180
4181 // Scalar Three Different
4182
4183 class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
4184                              RegisterClass FPRCD, RegisterClass FPRCS>
4185   : NeonI_Scalar3Diff<u, size, opcode,
4186                       (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
4187                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4188                       [],
4189                       NoItinerary>;
4190
4191 multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
4192   def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
4193   def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
4194 }
4195
4196 multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
4197   let Constraints = "$Src = $Rd" in {
4198     def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
4199                        (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
4200                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4201                        [],
4202                        NoItinerary>;
4203     def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
4204                        (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
4205                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
4206                        [],
4207                        NoItinerary>;
4208   }
4209 }
4210
4211 multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
4212                                              Instruction INSTH,
4213                                              Instruction INSTS> {
4214   def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4215             (INSTH FPR16:$Rn, FPR16:$Rm)>;
4216   def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4217             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4218 }
4219
4220 multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
4221                                              Instruction INSTH,
4222                                              Instruction INSTS> {
4223   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
4224             (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
4225   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
4226             (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
4227 }
4228
4229 // Scalar Two Registers Miscellaneous
4230
4231 class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
4232                              RegisterClass FPRCD, RegisterClass FPRCS>
4233   : NeonI_Scalar2SameMisc<u, size, opcode,
4234                           (outs FPRCD:$Rd), (ins FPRCS:$Rn),
4235                           !strconcat(asmop, "\t$Rd, $Rn"),
4236                           [],
4237                           NoItinerary>;
4238
4239 multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
4240                                          string asmop> {
4241   def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
4242                                       FPR32>;
4243   def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
4244                                       FPR64>;
4245 }
4246
4247 multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
4248   def dd : NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
4249 }
4250
4251 multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
4252   : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
4253   def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
4254   def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
4255   def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
4256 }
4257
4258 class NeonI_Scalar2SameMisc_fcvtxn_D_size<bit u, bits<5> opcode, string asmop>
4259   : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR32, FPR64>;
4260
4261 multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
4262                                                  string asmop> {
4263   def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
4264   def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
4265   def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
4266 }
4267
4268 class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
4269                                        string asmop, RegisterClass FPRC>
4270   : NeonI_Scalar2SameMisc<u, size, opcode,
4271                           (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
4272                           !strconcat(asmop, "\t$Rd, $Rn"),
4273                           [],
4274                           NoItinerary>;
4275
4276 multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
4277                                                  string asmop> {
4278
4279   let Constraints = "$Src = $Rd" in {
4280     def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
4281     def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
4282     def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
4283     def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
4284   }
4285 }
4286
4287 class Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<SDPatternOperator opnode,
4288                                                   Instruction INSTD>
4289   : Pat<(v1f32 (opnode (v1f64 FPR64:$Rn))),
4290         (INSTD FPR64:$Rn)>;
4291
4292 multiclass Neon_Scalar2SameMisc_fcvt_SD_size_patterns<SDPatternOperator opnode,
4293                                                       Instruction INSTS,
4294                                                       Instruction INSTD> {
4295   def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn))),
4296             (INSTS FPR32:$Rn)>;
4297   def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))),
4298             (INSTD FPR64:$Rn)>;
4299 }
4300
4301 multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator Sopnode,
4302                                                      SDPatternOperator Dopnode,
4303                                                      Instruction INSTS,
4304                                                      Instruction INSTD> {
4305   def : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn))),
4306             (INSTS FPR32:$Rn)>;
4307   def : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn))),
4308             (INSTD FPR64:$Rn)>;
4309 }
4310
4311 multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
4312                                                  Instruction INSTS,
4313                                                  Instruction INSTD> {
4314   def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))),
4315             (INSTS FPR32:$Rn)>;
4316   def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
4317             (INSTD FPR64:$Rn)>;
4318 }
4319
4320 class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
4321   : NeonI_Scalar2SameMisc<u, 0b11, opcode,
4322                           (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
4323                           !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4324                           [],
4325                           NoItinerary>;
4326
4327 multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
4328                                               string asmop> {
4329   def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
4330                            (outs FPR32:$Rd), (ins FPR32:$Rn, fpz32:$FPImm),
4331                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
4332                            [],
4333                            NoItinerary>;
4334   def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
4335                            (outs FPR64:$Rd), (ins FPR64:$Rn, fpz64movi:$FPImm),
4336                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
4337                            [],
4338                            NoItinerary>;
4339 }
4340
4341 class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
4342                                                 Instruction INSTD>
4343   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
4344                        (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
4345         (INSTD FPR64:$Rn, 0)>;
4346
4347 multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
4348                                                       Instruction INSTS,
4349                                                       Instruction INSTD> {
4350   def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn),
4351                            (v1f32 (scalar_to_vector (f32 fpimm:$FPImm))))),
4352             (INSTS FPR32:$Rn, fpimm:$FPImm)>;
4353   def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn),
4354                            (v1f64 (bitconvert (v8i8 Neon_AllZero))))),
4355             (INSTD FPR64:$Rn, 0)>;
4356 }
4357
4358 multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
4359                                                 Instruction INSTD> {
4360   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
4361             (INSTD FPR64:$Rn)>;
4362 }
4363
4364 multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
4365                                                    Instruction INSTB,
4366                                                    Instruction INSTH,
4367                                                    Instruction INSTS,
4368                                                    Instruction INSTD>
4369   : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
4370   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
4371             (INSTB FPR8:$Rn)>;
4372   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
4373             (INSTH FPR16:$Rn)>;
4374   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
4375             (INSTS FPR32:$Rn)>;
4376 }
4377
4378 multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
4379                                                        SDPatternOperator opnode,
4380                                                        Instruction INSTH,
4381                                                        Instruction INSTS,
4382                                                        Instruction INSTD> {
4383   def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
4384             (INSTH FPR16:$Rn)>;
4385   def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
4386             (INSTS FPR32:$Rn)>;
4387   def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
4388             (INSTD FPR64:$Rn)>;
4389
4390 }
4391
4392 multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
4393                                                        SDPatternOperator opnode,
4394                                                        Instruction INSTB,
4395                                                        Instruction INSTH,
4396                                                        Instruction INSTS,
4397                                                        Instruction INSTD> {
4398   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
4399             (INSTB FPR8:$Src, FPR8:$Rn)>;
4400   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
4401             (INSTH FPR16:$Src, FPR16:$Rn)>;
4402   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
4403             (INSTS FPR32:$Src, FPR32:$Rn)>;
4404   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
4405             (INSTD FPR64:$Src, FPR64:$Rn)>;
4406 }
4407
4408 // Scalar Shift By Immediate
4409
4410 class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
4411                                 RegisterClass FPRC, Operand ImmTy>
4412   : NeonI_ScalarShiftImm<u, opcode,
4413                          (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
4414                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4415                          [], NoItinerary>;
4416
4417 multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
4418                                             string asmop> {
4419   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4420     bits<6> Imm;
4421     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4422     let Inst{21-16} = Imm;
4423   }
4424 }
4425
4426 multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
4427                                                string asmop>
4428   : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
4429   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
4430     bits<3> Imm;
4431     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4432     let Inst{18-16} = Imm;
4433   }
4434   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
4435     bits<4> Imm;
4436     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4437     let Inst{19-16} = Imm;
4438   }
4439   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4440     bits<5> Imm;
4441     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4442     let Inst{20-16} = Imm;
4443   }
4444 }
4445
4446 multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
4447                                             string asmop> {
4448   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
4449     bits<6> Imm;
4450     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4451     let Inst{21-16} = Imm;
4452   }
4453 }
4454
4455 multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
4456                                               string asmop>
4457   : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
4458   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
4459     bits<3> Imm;
4460     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4461     let Inst{18-16} = Imm;
4462   }
4463   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
4464     bits<4> Imm;
4465     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4466     let Inst{19-16} = Imm;
4467   }
4468   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
4469     bits<5> Imm;
4470     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4471     let Inst{20-16} = Imm;
4472   }
4473 }
4474
4475 class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4476   : NeonI_ScalarShiftImm<u, opcode,
4477                          (outs FPR64:$Rd), (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
4478                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4479                          [], NoItinerary> {
4480     bits<6> Imm;
4481     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4482     let Inst{21-16} = Imm;
4483     let Constraints = "$Src = $Rd";
4484 }
4485
4486 class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4487   : NeonI_ScalarShiftImm<u, opcode,
4488                          (outs FPR64:$Rd), (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
4489                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4490                          [], NoItinerary> {
4491     bits<6> Imm;
4492     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4493     let Inst{21-16} = Imm;
4494     let Constraints = "$Src = $Rd";
4495 }
4496
4497 class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
4498                                        RegisterClass FPRCD, RegisterClass FPRCS,
4499                                        Operand ImmTy>
4500   : NeonI_ScalarShiftImm<u, opcode,
4501                          (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
4502                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4503                          [], NoItinerary>;
4504
4505 multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
4506                                                 string asmop> {
4507   def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
4508                                              shr_imm8> {
4509     bits<3> Imm;
4510     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4511     let Inst{18-16} = Imm;
4512   }
4513   def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
4514                                              shr_imm16> {
4515     bits<4> Imm;
4516     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4517     let Inst{19-16} = Imm;
4518   }
4519   def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
4520                                              shr_imm32> {
4521     bits<5> Imm;
4522     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4523     let Inst{20-16} = Imm;
4524   }
4525 }
4526
4527 multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
4528   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4529     bits<5> Imm;
4530     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4531     let Inst{20-16} = Imm;
4532   }
4533   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4534     bits<6> Imm;
4535     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4536     let Inst{21-16} = Imm;
4537   }
4538 }
4539
4540 multiclass Neon_ScalarShiftImm_D_size_patterns<SDPatternOperator opnode,
4541                                                Instruction INSTD> {
4542   def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4543                 (INSTD FPR64:$Rn, imm:$Imm)>;
4544 }
4545
4546 class Neon_ScalarShiftImm_arm_D_size_patterns<SDPatternOperator opnode,
4547                                               Instruction INSTD>
4548   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 (Neon_vdup (i32 imm:$Imm))))),
4549         (INSTD FPR64:$Rn, imm:$Imm)>;
4550
4551 multiclass Neon_ScalarShiftImm_BHSD_size_patterns<SDPatternOperator opnode,
4552                                                   Instruction INSTB,
4553                                                   Instruction INSTH,
4554                                                   Instruction INSTS,
4555                                                   Instruction INSTD>
4556   : Neon_ScalarShiftImm_D_size_patterns<opnode, INSTD> {
4557   def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 imm:$Imm))),
4558                 (INSTB FPR8:$Rn, imm:$Imm)>;
4559   def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 imm:$Imm))),
4560                 (INSTH FPR16:$Rn, imm:$Imm)>;
4561   def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
4562                 (INSTS FPR32:$Rn, imm:$Imm)>;
4563 }
4564
4565 class Neon_ScalarShiftImm_accum_D_size_patterns<SDPatternOperator opnode,
4566                                                 Instruction INSTD>
4567   : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4568         (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
4569
4570 multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
4571                                                        SDPatternOperator opnode,
4572                                                        Instruction INSTH,
4573                                                        Instruction INSTS,
4574                                                        Instruction INSTD> {
4575   def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 imm:$Imm))),
4576                 (INSTH FPR16:$Rn, imm:$Imm)>;
4577   def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
4578                 (INSTS FPR32:$Rn, imm:$Imm)>;
4579   def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4580                 (INSTD FPR64:$Rn, imm:$Imm)>;
4581 }
4582
4583 multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode,
4584                                                       SDPatternOperator Dopnode,
4585                                                       Instruction INSTS,
4586                                                       Instruction INSTD> {
4587   def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
4588                 (INSTS FPR32:$Rn, imm:$Imm)>;
4589   def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4590                 (INSTD FPR64:$Rn, imm:$Imm)>;
4591 }
4592
4593 multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator Sopnode,
4594                                                       SDPatternOperator Dopnode,
4595                                                       Instruction INSTS,
4596                                                       Instruction INSTD> {
4597   def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 imm:$Imm))),
4598                 (INSTS FPR32:$Rn, imm:$Imm)>;
4599   def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 imm:$Imm))),
4600                 (INSTD FPR64:$Rn, imm:$Imm)>;
4601 }
4602
4603 // Scalar Signed Shift Right (Immediate)
4604 defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
4605 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
4606 // Pattern to match llvm.arm.* intrinsic.
4607 def : Neon_ScalarShiftImm_arm_D_size_patterns<sra, SSHRddi>;
4608
4609 // Scalar Unsigned Shift Right (Immediate)
4610 defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
4611 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
4612 // Pattern to match llvm.arm.* intrinsic.
4613 def : Neon_ScalarShiftImm_arm_D_size_patterns<srl, USHRddi>;
4614
4615 // Scalar Signed Rounding Shift Right (Immediate)
4616 defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
4617 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
4618
4619 // Scalar Unigned Rounding Shift Right (Immediate)
4620 defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
4621 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
4622
4623 // Scalar Signed Shift Right and Accumulate (Immediate)
4624 def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
4625 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsrads_n, SSRA>;
4626
4627 // Scalar Unsigned Shift Right and Accumulate (Immediate)
4628 def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
4629 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsradu_n, USRA>;
4630
4631 // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
4632 def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
4633 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vrsrads_n, SRSRA>;
4634
4635 // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
4636 def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
4637 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vrsradu_n, URSRA>;
4638
4639 // Scalar Shift Left (Immediate)
4640 defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
4641 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
4642 // Pattern to match llvm.arm.* intrinsic.
4643 def : Neon_ScalarShiftImm_arm_D_size_patterns<shl, SHLddi>;
4644
4645 // Signed Saturating Shift Left (Immediate)
4646 defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
4647 defm : Neon_ScalarShiftImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
4648                                               SQSHLbbi, SQSHLhhi,
4649                                               SQSHLssi, SQSHLddi>;
4650 // Pattern to match llvm.arm.* intrinsic.
4651 defm : Neon_ScalarShiftImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
4652
4653 // Unsigned Saturating Shift Left (Immediate)
4654 defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
4655 defm : Neon_ScalarShiftImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
4656                                               UQSHLbbi, UQSHLhhi,
4657                                               UQSHLssi, UQSHLddi>;
4658 // Pattern to match llvm.arm.* intrinsic.
4659 defm : Neon_ScalarShiftImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
4660
4661 // Signed Saturating Shift Left Unsigned (Immediate)
4662 defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
4663 defm : Neon_ScalarShiftImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
4664                                               SQSHLUbbi, SQSHLUhhi,
4665                                               SQSHLUssi, SQSHLUddi>;
4666
4667 // Shift Right And Insert (Immediate)
4668 def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
4669 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsri, SRI>;
4670
4671 // Shift Left And Insert (Immediate)
4672 def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
4673 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsli, SLI>;
4674
4675 // Signed Saturating Shift Right Narrow (Immediate)
4676 defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
4677 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
4678                                                     SQSHRNbhi, SQSHRNhsi,
4679                                                     SQSHRNsdi>;
4680
4681 // Unsigned Saturating Shift Right Narrow (Immediate)
4682 defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
4683 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
4684                                                     UQSHRNbhi, UQSHRNhsi,
4685                                                     UQSHRNsdi>;
4686
4687 // Signed Saturating Rounded Shift Right Narrow (Immediate)
4688 defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
4689 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
4690                                                     SQRSHRNbhi, SQRSHRNhsi,
4691                                                     SQRSHRNsdi>;
4692
4693 // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
4694 defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
4695 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
4696                                                     UQRSHRNbhi, UQRSHRNhsi,
4697                                                     UQRSHRNsdi>;
4698
4699 // Signed Saturating Shift Right Unsigned Narrow (Immediate)
4700 defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
4701 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
4702                                                     SQSHRUNbhi, SQSHRUNhsi,
4703                                                     SQSHRUNsdi>;
4704
4705 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
4706 defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
4707 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
4708                                                     SQRSHRUNbhi, SQRSHRUNhsi,
4709                                                     SQRSHRUNsdi>;
4710
4711 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
4712 defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
4713 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_s32,
4714                                                   int_aarch64_neon_vcvtf64_n_s64,
4715                                                   SCVTF_Nssi, SCVTF_Nddi>;
4716
4717 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
4718 defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
4719 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_u32,
4720                                                   int_aarch64_neon_vcvtf64_n_u64,
4721                                                   UCVTF_Nssi, UCVTF_Nddi>;
4722
4723 // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
4724 defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
4725 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_s32_f32,
4726                                                   int_aarch64_neon_vcvtd_n_s64_f64,
4727                                                   FCVTZS_Nssi, FCVTZS_Nddi>;
4728
4729 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
4730 defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
4731 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_u32_f32,
4732                                                   int_aarch64_neon_vcvtd_n_u64_f64,
4733                                                   FCVTZU_Nssi, FCVTZU_Nddi>;
4734
4735 // Scalar Integer Add
4736 let isCommutable = 1 in {
4737 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
4738 }
4739
4740 // Scalar Integer Sub
4741 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
4742
4743 // Pattern for Scalar Integer Add and Sub with D register only
4744 defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
4745 defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
4746
4747 // Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
4748 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
4749 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
4750 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
4751 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
4752
4753 // Scalar Integer Saturating Add (Signed, Unsigned)
4754 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
4755 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
4756
4757 // Scalar Integer Saturating Sub (Signed, Unsigned)
4758 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
4759 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
4760
4761
4762 // Patterns to match llvm.aarch64.* intrinsic for
4763 // Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
4764 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqadds, SQADDbbb,
4765                                            SQADDhhh, SQADDsss, SQADDddd>;
4766 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqaddu, UQADDbbb,
4767                                            UQADDhhh, UQADDsss, UQADDddd>;
4768 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubs, SQSUBbbb,
4769                                            SQSUBhhh, SQSUBsss, SQSUBddd>;
4770 defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubu, UQSUBbbb,
4771                                            UQSUBhhh, UQSUBsss, UQSUBddd>;
4772
4773 // Scalar Integer Saturating Doubling Multiply Half High
4774 defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
4775
4776 // Scalar Integer Saturating Rounding Doubling Multiply Half High
4777 defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
4778
4779 // Patterns to match llvm.arm.* intrinsic for
4780 // Scalar Integer Saturating Doubling Multiply Half High and
4781 // Scalar Integer Saturating Rounding Doubling Multiply Half High
4782 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
4783                                                                SQDMULHsss>;
4784 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
4785                                                                 SQRDMULHsss>;
4786
4787 // Scalar Floating-point Multiply Extended
4788 defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
4789
4790 // Scalar Floating-point Reciprocal Step
4791 defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
4792
4793 // Scalar Floating-point Reciprocal Square Root Step
4794 defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
4795
4796 // Patterns to match llvm.arm.* intrinsic for
4797 // Scalar Floating-point Reciprocal Step and
4798 // Scalar Floating-point Reciprocal Square Root Step
4799 defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrecps, FRECPSsss,
4800                                                               FRECPSddd>;
4801 defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrsqrts, FRSQRTSsss,
4802                                                                FRSQRTSddd>;
4803
4804 // Patterns to match llvm.aarch64.* intrinsic for
4805 // Scalar Floating-point Multiply Extended,
4806 multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
4807                                                   Instruction INSTS,
4808                                                   Instruction INSTD> {
4809   def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4810             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4811   def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4812             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4813 }
4814
4815 defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
4816                                               FMULXsss,FMULXddd>;
4817
4818 // Scalar Integer Shift Left (Signed, Unsigned)
4819 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
4820 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
4821
4822 // Patterns to match llvm.arm.* intrinsic for
4823 // Scalar Integer Shift Left (Signed, Unsigned)
4824 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
4825 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
4826
4827 // Patterns to match llvm.aarch64.* intrinsic for
4828 // Scalar Integer Shift Left (Signed, Unsigned)
4829 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
4830 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
4831
4832 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
4833 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
4834 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
4835
4836 // Patterns to match llvm.aarch64.* intrinsic for
4837 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
4838 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
4839                                            SQSHLhhh, SQSHLsss, SQSHLddd>;
4840 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
4841                                            UQSHLhhh, UQSHLsss, UQSHLddd>;
4842
4843 // Patterns to match llvm.arm.* intrinsic for
4844 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
4845 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
4846 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
4847
4848 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
4849 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
4850 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
4851
4852 // Patterns to match llvm.aarch64.* intrinsic for
4853 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
4854 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
4855 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
4856
4857 // Patterns to match llvm.arm.* intrinsic for
4858 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
4859 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
4860 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
4861
4862 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
4863 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
4864 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
4865
4866 // Patterns to match llvm.aarch64.* intrinsic for
4867 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
4868 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
4869                                            SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
4870 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
4871                                            UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
4872
4873 // Patterns to match llvm.arm.* intrinsic for
4874 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
4875 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
4876 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
4877
4878 // Signed Saturating Doubling Multiply-Add Long
4879 defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
4880 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
4881                                             SQDMLALshh, SQDMLALdss>;
4882
4883 // Signed Saturating Doubling Multiply-Subtract Long
4884 defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
4885 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
4886                                             SQDMLSLshh, SQDMLSLdss>;
4887
4888 // Signed Saturating Doubling Multiply Long
4889 defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
4890 defm : Neon_Scalar3Diff_HS_size_patterns<int_arm_neon_vqdmull,
4891                                          SQDMULLshh, SQDMULLdss>;
4892
4893 // Scalar Signed Integer Convert To Floating-point
4894 defm SCVTF  : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
4895 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_s32,
4896                                                  int_aarch64_neon_vcvtf64_s64,
4897                                                  SCVTFss, SCVTFdd>;
4898
4899 // Scalar Unsigned Integer Convert To Floating-point
4900 defm UCVTF  : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
4901 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_u32,
4902                                                  int_aarch64_neon_vcvtf64_u64,
4903                                                  UCVTFss, UCVTFdd>;
4904
4905 // Scalar Floating-point Converts
4906 def FCVTXN : NeonI_Scalar2SameMisc_fcvtxn_D_size<0b1, 0b10110, "fcvtxn">;
4907 def : Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<int_aarch64_neon_fcvtxn,
4908                                                   FCVTXN>;
4909
4910 defm FCVTNS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11010, "fcvtns">;
4911 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtns,
4912                                                   FCVTNSss, FCVTNSdd>;
4913
4914 defm FCVTNU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11010, "fcvtnu">;
4915 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtnu,
4916                                                   FCVTNUss, FCVTNUdd>;
4917
4918 defm FCVTMS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11011, "fcvtms">;
4919 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtms,
4920                                                   FCVTMSss, FCVTMSdd>;
4921
4922 defm FCVTMU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11011, "fcvtmu">;
4923 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtmu,
4924                                                   FCVTMUss, FCVTMUdd>;
4925
4926 defm FCVTAS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11100, "fcvtas">;
4927 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtas,
4928                                                   FCVTASss, FCVTASdd>;
4929
4930 defm FCVTAU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11100, "fcvtau">;
4931 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtau,
4932                                                   FCVTAUss, FCVTAUdd>;
4933
4934 defm FCVTPS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11010, "fcvtps">;
4935 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtps,
4936                                                   FCVTPSss, FCVTPSdd>;
4937
4938 defm FCVTPU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11010, "fcvtpu">;
4939 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtpu,
4940                                                   FCVTPUss, FCVTPUdd>;
4941
4942 defm FCVTZS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11011, "fcvtzs">;
4943 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzs,
4944                                                   FCVTZSss, FCVTZSdd>;
4945
4946 defm FCVTZU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11011, "fcvtzu">;
4947 defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzu,
4948                                                   FCVTZUss, FCVTZUdd>;
4949
4950 // Scalar Floating-point Reciprocal Estimate
4951 defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
4952 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrecpe,
4953                                              FRECPEss, FRECPEdd>;
4954
4955 // Scalar Floating-point Reciprocal Exponent
4956 defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
4957 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
4958                                              FRECPXss, FRECPXdd>;
4959
4960 // Scalar Floating-point Reciprocal Square Root Estimate
4961 defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
4962 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrsqrte,
4963                                              FRSQRTEss, FRSQRTEdd>;
4964
4965 // Scalar Integer Compare
4966
4967 // Scalar Compare Bitwise Equal
4968 def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
4969 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
4970
4971 class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
4972                                               Instruction INSTD,
4973                                               CondCode CC>
4974   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
4975         (INSTD FPR64:$Rn, FPR64:$Rm)>;
4976
4977 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
4978
4979 // Scalar Compare Signed Greather Than Or Equal
4980 def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
4981 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
4982
4983 // Scalar Compare Unsigned Higher Or Same
4984 def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
4985 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
4986
4987 // Scalar Compare Unsigned Higher
4988 def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
4989 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
4990
4991 // Scalar Compare Signed Greater Than
4992 def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
4993 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
4994
4995 // Scalar Compare Bitwise Test Bits
4996 def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
4997 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
4998 def : Neon_Scalar3Same_cmp_D_size_patterns<Neon_tst, CMTSTddd>;
4999
5000 // Scalar Compare Bitwise Equal To Zero
5001 def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
5002 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
5003                                                 CMEQddi>;
5004
5005 // Scalar Compare Signed Greather Than Or Equal To Zero
5006 def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
5007 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
5008                                                 CMGEddi>;
5009
5010 // Scalar Compare Signed Greater Than Zero
5011 def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
5012 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
5013                                                 CMGTddi>;
5014
5015 // Scalar Compare Signed Less Than Or Equal To Zero
5016 def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
5017 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
5018                                                 CMLEddi>;
5019
5020 // Scalar Compare Less Than Zero
5021 def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
5022 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
5023                                                 CMLTddi>;
5024
5025 // Scalar Floating-point Compare
5026
5027 // Scalar Floating-point Compare Mask Equal
5028 defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
5029 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vceq,
5030                                              FCMEQsss, FCMEQddd>;
5031
5032 // Scalar Floating-point Compare Mask Equal To Zero
5033 defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
5034 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vceq,
5035                                                   FCMEQZssi, FCMEQZddi>;
5036
5037 // Scalar Floating-point Compare Mask Greater Than Or Equal
5038 defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
5039 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcge,
5040                                              FCMGEsss, FCMGEddd>;
5041
5042 // Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
5043 defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
5044 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcge,
5045                                                   FCMGEZssi, FCMGEZddi>;
5046
5047 // Scalar Floating-point Compare Mask Greather Than
5048 defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
5049 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcgt,
5050                                              FCMGTsss, FCMGTddd>;
5051
5052 // Scalar Floating-point Compare Mask Greather Than Zero
5053 defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
5054 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcgt,
5055                                                   FCMGTZssi, FCMGTZddi>;
5056
5057 // Scalar Floating-point Compare Mask Less Than Or Equal To Zero
5058 defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
5059 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vclez,
5060                                                   FCMLEZssi, FCMLEZddi>;
5061
5062 // Scalar Floating-point Compare Mask Less Than Zero
5063 defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
5064 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcltz,
5065                                                   FCMLTZssi, FCMLTZddi>;
5066
5067 // Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
5068 defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
5069 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcage,
5070                                              FACGEsss, FACGEddd>;
5071
5072 // Scalar Floating-point Absolute Compare Mask Greater Than
5073 defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
5074 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcagt,
5075                                              FACGTsss, FACGTddd>;
5076
5077 // Scakar Floating-point Absolute Difference
5078 defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
5079 defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vabd,
5080                                          FABDsss, FABDddd>;
5081
5082 // Scalar Absolute Value
5083 defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
5084 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
5085
5086 // Scalar Signed Saturating Absolute Value
5087 defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
5088 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
5089                                                SQABSbb, SQABShh, SQABSss, SQABSdd>;
5090
5091 // Scalar Negate
5092 defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
5093 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
5094
5095 // Scalar Signed Saturating Negate
5096 defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
5097 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
5098                                                SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
5099
5100 // Scalar Signed Saturating Accumulated of Unsigned Value
5101 defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
5102 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
5103                                                      SUQADDbb, SUQADDhh,
5104                                                      SUQADDss, SUQADDdd>;
5105
5106 // Scalar Unsigned Saturating Accumulated of Signed Value
5107 defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
5108 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
5109                                                      USQADDbb, USQADDhh,
5110                                                      USQADDss, USQADDdd>;
5111
5112 // Scalar Signed Saturating Extract Unsigned Narrow
5113 defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
5114 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
5115                                                      SQXTUNbh, SQXTUNhs,
5116                                                      SQXTUNsd>;
5117
5118 // Scalar Signed Saturating Extract Narrow
5119 defm SQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
5120 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
5121                                                      SQXTNbh, SQXTNhs,
5122                                                      SQXTNsd>;
5123
5124 // Scalar Unsigned Saturating Extract Narrow
5125 defm UQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
5126 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
5127                                                      UQXTNbh, UQXTNhs,
5128                                                      UQXTNsd>;
5129
5130 // Scalar Reduce Pairwise
5131
5132 multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
5133                                      string asmop, bit Commutable = 0> {
5134   let isCommutable = Commutable in {
5135     def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
5136                                 (outs FPR64:$Rd), (ins VPR128:$Rn),
5137                                 !strconcat(asmop, "\t$Rd, $Rn.2d"),
5138                                 [],
5139                                 NoItinerary>;
5140   }
5141 }
5142
5143 multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
5144                                      string asmop, bit Commutable = 0>
5145   : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
5146   let isCommutable = Commutable in {
5147     def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
5148                                 (outs FPR32:$Rd), (ins VPR64:$Rn),
5149                                 !strconcat(asmop, "\t$Rd, $Rn.2s"),
5150                                 [],
5151                                 NoItinerary>;
5152   }
5153 }
5154
5155 // Scalar Reduce Addition Pairwise (Integer) with
5156 // Pattern to match llvm.arm.* intrinsic
5157 defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
5158
5159 // Pattern to match llvm.aarch64.* intrinsic for
5160 // Scalar Reduce Addition Pairwise (Integer)
5161 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
5162           (ADDPvv_D_2D VPR128:$Rn)>;
5163
5164 // Scalar Reduce Addition Pairwise (Floating Point)
5165 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
5166
5167 // Scalar Reduce Maximum Pairwise (Floating Point)
5168 defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
5169
5170 // Scalar Reduce Minimum Pairwise (Floating Point)
5171 defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
5172
5173 // Scalar Reduce maxNum Pairwise (Floating Point)
5174 defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
5175
5176 // Scalar Reduce minNum Pairwise (Floating Point)
5177 defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
5178
5179 multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
5180                                             SDPatternOperator opnodeD,
5181                                             Instruction INSTS,
5182                                             Instruction INSTD> {
5183   def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
5184             (INSTS VPR64:$Rn)>;
5185   def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
5186             (INSTD VPR128:$Rn)>;
5187 }
5188
5189 // Patterns to match llvm.aarch64.* intrinsic for
5190 // Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
5191 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
5192   int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
5193
5194 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
5195   int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
5196
5197 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
5198   int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
5199
5200 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
5201   int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
5202
5203 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm, 
5204   int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
5205
5206
5207 // Scalar by element Arithmetic
5208
5209 class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
5210                                     string rmlane, bit u, bit szhi, bit szlo,
5211                                     RegisterClass ResFPR, RegisterClass OpFPR,
5212                                     RegisterOperand OpVPR, Operand OpImm>
5213   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
5214                              (outs ResFPR:$Rd),
5215                              (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
5216                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
5217                              [],
5218                              NoItinerary> {
5219   bits<3> Imm;
5220   bits<5> MRm;
5221 }
5222
5223 class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
5224                                                     string rmlane,
5225                                                     bit u, bit szhi, bit szlo,
5226                                                     RegisterClass ResFPR,
5227                                                     RegisterClass OpFPR,
5228                                                     RegisterOperand OpVPR,
5229                                                     Operand OpImm>
5230   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
5231                              (outs ResFPR:$Rd),
5232                              (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
5233                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
5234                              [],
5235                              NoItinerary> {
5236   let Constraints = "$src = $Rd";
5237   bits<3> Imm;
5238   bits<5> MRm;
5239 }
5240
5241 // Scalar Floating Point  multiply (scalar, by element)
5242 def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
5243   0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5244   let Inst{11} = Imm{1}; // h
5245   let Inst{21} = Imm{0}; // l
5246   let Inst{20-16} = MRm;
5247 }
5248 def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
5249   0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5250   let Inst{11} = Imm{0}; // h
5251   let Inst{21} = 0b0;    // l
5252   let Inst{20-16} = MRm;
5253 }
5254
5255 // Scalar Floating Point  multiply extended (scalar, by element)
5256 def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
5257   0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5258   let Inst{11} = Imm{1}; // h
5259   let Inst{21} = Imm{0}; // l
5260   let Inst{20-16} = MRm;
5261 }
5262 def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
5263   0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5264   let Inst{11} = Imm{0}; // h
5265   let Inst{21} = 0b0;    // l
5266   let Inst{20-16} = MRm;
5267 }
5268
5269 multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
5270   SDPatternOperator opnode,
5271   Instruction INST,
5272   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
5273   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5274
5275   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
5276                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
5277              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5278
5279   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
5280                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
5281              (ResTy (INST (ResTy FPRC:$Rn),
5282                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5283                OpNImm:$Imm))>;
5284
5285   // swapped operands
5286   def  : Pat<(ResTy (opnode
5287                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5288                (ResTy FPRC:$Rn))),
5289              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5290
5291   def  : Pat<(ResTy (opnode
5292                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5293                (ResTy FPRC:$Rn))),
5294              (ResTy (INST (ResTy FPRC:$Rn),
5295                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5296                OpNImm:$Imm))>;
5297 }
5298
5299 // Patterns for Scalar Floating Point  multiply (scalar, by element)
5300 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
5301   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5302 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
5303   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5304
5305 // Patterns for Scalar Floating Point  multiply extended (scalar, by element)
5306 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
5307   FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
5308   v2f32, v4f32, neon_uimm1_bare>;
5309 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
5310   FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
5311   v1f64, v2f64, neon_uimm0_bare>;
5312
5313
5314 // Scalar Floating Point fused multiply-add (scalar, by element)
5315 def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
5316   0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5317   let Inst{11} = Imm{1}; // h
5318   let Inst{21} = Imm{0}; // l
5319   let Inst{20-16} = MRm;
5320 }
5321 def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
5322   0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5323   let Inst{11} = Imm{0}; // h
5324   let Inst{21} = 0b0;    // l
5325   let Inst{20-16} = MRm;
5326 }
5327
5328 // Scalar Floating Point fused multiply-subtract (scalar, by element)
5329 def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
5330   0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5331   let Inst{11} = Imm{1}; // h
5332   let Inst{21} = Imm{0}; // l
5333   let Inst{20-16} = MRm;
5334 }
5335 def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
5336   0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
5337   let Inst{11} = Imm{0}; // h
5338   let Inst{21} = 0b0;    // l
5339   let Inst{20-16} = MRm;
5340 }
5341 // We are allowed to match the fma instruction regardless of compile options.
5342 multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
5343   Instruction FMLAI, Instruction FMLSI,
5344   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
5345   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5346   // fmla
5347   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5348                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5349                (ResTy FPRC:$Ra))),
5350              (ResTy (FMLAI (ResTy FPRC:$Ra),
5351                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5352
5353   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5354                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5355                (ResTy FPRC:$Ra))),
5356              (ResTy (FMLAI (ResTy FPRC:$Ra),
5357                (ResTy FPRC:$Rn),
5358                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5359                OpNImm:$Imm))>;
5360
5361   // swapped fmla operands
5362   def  : Pat<(ResTy (fma
5363                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
5364                (ResTy FPRC:$Rn),
5365                (ResTy FPRC:$Ra))),
5366              (ResTy (FMLAI (ResTy FPRC:$Ra),
5367                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5368
5369   def  : Pat<(ResTy (fma
5370                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
5371                (ResTy FPRC:$Rn),
5372                (ResTy FPRC:$Ra))),
5373              (ResTy (FMLAI (ResTy FPRC:$Ra),
5374                (ResTy FPRC:$Rn),
5375                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5376                OpNImm:$Imm))>;
5377
5378   // fmls
5379   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5380                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
5381                (ResTy FPRC:$Ra))),
5382              (ResTy (FMLSI (ResTy FPRC:$Ra),
5383                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5384
5385   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
5386                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
5387                (ResTy FPRC:$Ra))),
5388              (ResTy (FMLSI (ResTy FPRC:$Ra),
5389                (ResTy FPRC:$Rn),
5390                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5391                OpNImm:$Imm))>;
5392
5393   // swapped fmls operands
5394   def  : Pat<(ResTy (fma
5395                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
5396                (ResTy FPRC:$Rn),
5397                (ResTy FPRC:$Ra))),
5398              (ResTy (FMLSI (ResTy FPRC:$Ra),
5399                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
5400
5401   def  : Pat<(ResTy (fma
5402                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
5403                (ResTy FPRC:$Rn),
5404                (ResTy FPRC:$Ra))),
5405              (ResTy (FMLSI (ResTy FPRC:$Ra),
5406                (ResTy FPRC:$Rn),
5407                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
5408                OpNImm:$Imm))>;
5409 }
5410
5411 // Scalar Floating Point fused multiply-add and
5412 // multiply-subtract (scalar, by element)
5413 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
5414   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5415 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
5416   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5417 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
5418   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5419
5420 // Scalar Signed saturating doubling multiply long (scalar, by element)
5421 def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5422   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5423   let Inst{11} = 0b0; // h
5424   let Inst{21} = Imm{1}; // l
5425   let Inst{20} = Imm{0}; // m
5426   let Inst{19-16} = MRm{3-0};
5427 }
5428 def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5429   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5430   let Inst{11} = Imm{2}; // h
5431   let Inst{21} = Imm{1}; // l
5432   let Inst{20} = Imm{0}; // m
5433   let Inst{19-16} = MRm{3-0};
5434 }
5435 def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5436   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5437   let Inst{11} = 0b0;    // h
5438   let Inst{21} = Imm{0}; // l
5439   let Inst{20-16} = MRm;
5440 }
5441 def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5442   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5443   let Inst{11} = Imm{1};    // h
5444   let Inst{21} = Imm{0};    // l
5445   let Inst{20-16} = MRm;
5446 }
5447
5448 multiclass Neon_ScalarXIndexedElem_MUL_Patterns<
5449   SDPatternOperator opnode,
5450   Instruction INST,
5451   ValueType ResTy, RegisterClass FPRC,
5452   ValueType OpVTy, ValueType OpTy,
5453   ValueType VecOpTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
5454
5455   def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
5456                (OpVTy (scalar_to_vector
5457                  (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))))),
5458              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
5459
5460   //swapped operands
5461   def  : Pat<(ResTy (opnode
5462                (OpVTy (scalar_to_vector
5463                  (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
5464                  (OpVTy FPRC:$Rn))),
5465              (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
5466 }
5467
5468
5469 // Patterns for Scalar Signed saturating doubling
5470 // multiply long (scalar, by element)
5471 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
5472   SQDMULLshv_4H, v1i32, FPR16, v1i16, i16, v4i16,
5473   i32, VPR64Lo, neon_uimm2_bare>;
5474 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
5475   SQDMULLshv_8H, v1i32, FPR16, v1i16, i16, v8i16,
5476   i32, VPR128Lo, neon_uimm3_bare>;
5477 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
5478   SQDMULLdsv_2S, v1i64, FPR32, v1i32, i32, v2i32,
5479   i32, VPR64Lo, neon_uimm1_bare>;
5480 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
5481   SQDMULLdsv_4S, v1i64, FPR32, v1i32, i32, v4i32,
5482   i32, VPR128Lo, neon_uimm2_bare>;
5483
5484 // Scalar Signed saturating doubling multiply-add long (scalar, by element)
5485 def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
5486   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5487   let Inst{11} = 0b0; // h
5488   let Inst{21} = Imm{1}; // l
5489   let Inst{20} = Imm{0}; // m
5490   let Inst{19-16} = MRm{3-0};
5491 }
5492 def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
5493   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5494   let Inst{11} = Imm{2}; // h
5495   let Inst{21} = Imm{1}; // l
5496   let Inst{20} = Imm{0}; // m
5497   let Inst{19-16} = MRm{3-0};
5498 }
5499 def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
5500   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5501   let Inst{11} = 0b0;    // h
5502   let Inst{21} = Imm{0}; // l
5503   let Inst{20-16} = MRm;
5504 }
5505 def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
5506   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5507   let Inst{11} = Imm{1};    // h
5508   let Inst{21} = Imm{0};    // l
5509   let Inst{20-16} = MRm;
5510 }
5511
5512 // Scalar Signed saturating doubling
5513 // multiply-subtract long (scalar, by element)
5514 def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5515   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5516   let Inst{11} = 0b0; // h
5517   let Inst{21} = Imm{1}; // l
5518   let Inst{20} = Imm{0}; // m
5519   let Inst{19-16} = MRm{3-0};
5520 }
5521 def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5522   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5523   let Inst{11} = Imm{2}; // h
5524   let Inst{21} = Imm{1}; // l
5525   let Inst{20} = Imm{0}; // m
5526   let Inst{19-16} = MRm{3-0};
5527 }
5528 def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5529   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5530   let Inst{11} = 0b0;    // h
5531   let Inst{21} = Imm{0}; // l
5532   let Inst{20-16} = MRm;
5533 }
5534 def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5535   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5536   let Inst{11} = Imm{1};    // h
5537   let Inst{21} = Imm{0};    // l
5538   let Inst{20-16} = MRm;
5539 }
5540
5541 multiclass Neon_ScalarXIndexedElem_MLAL_Patterns<
5542   SDPatternOperator opnode,
5543   SDPatternOperator coreopnode,
5544   Instruction INST,
5545   ValueType ResTy, RegisterClass ResFPRC, RegisterClass FPRC,
5546   ValueType OpTy,
5547   ValueType OpVTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
5548
5549   def  : Pat<(ResTy (opnode
5550                (ResTy ResFPRC:$Ra),
5551                (ResTy (coreopnode (OpTy FPRC:$Rn),
5552                  (OpTy (scalar_to_vector
5553                    (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))))))),
5554              (ResTy (INST (ResTy ResFPRC:$Ra),
5555                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
5556
5557   // swapped operands
5558   def  : Pat<(ResTy (opnode
5559                (ResTy ResFPRC:$Ra),
5560                (ResTy (coreopnode
5561                  (OpTy (scalar_to_vector
5562                    (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))),
5563                  (OpTy FPRC:$Rn))))),
5564              (ResTy (INST (ResTy ResFPRC:$Ra),
5565                (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
5566 }
5567
5568 // Patterns for Scalar Signed saturating
5569 // doubling multiply-add long (scalar, by element)
5570 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
5571   int_arm_neon_vqdmull, SQDMLALshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
5572   i32, VPR64Lo, neon_uimm2_bare>;
5573 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
5574   int_arm_neon_vqdmull, SQDMLALshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
5575   i32, VPR128Lo, neon_uimm3_bare>;
5576 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
5577   int_arm_neon_vqdmull, SQDMLALdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
5578   i32, VPR64Lo, neon_uimm1_bare>;
5579 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
5580   int_arm_neon_vqdmull, SQDMLALdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
5581   i32, VPR128Lo, neon_uimm2_bare>;
5582
5583 // Patterns for Scalar Signed saturating
5584 // doubling multiply-sub long (scalar, by element)
5585 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
5586   int_arm_neon_vqdmull, SQDMLSLshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
5587   i32, VPR64Lo, neon_uimm2_bare>;
5588 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
5589   int_arm_neon_vqdmull, SQDMLSLshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
5590   i32, VPR128Lo, neon_uimm3_bare>;
5591 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
5592   int_arm_neon_vqdmull, SQDMLSLdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
5593   i32, VPR64Lo, neon_uimm1_bare>;
5594 defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
5595   int_arm_neon_vqdmull, SQDMLSLdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
5596   i32, VPR128Lo, neon_uimm2_bare>;
5597
5598
5599 // Scalar Signed saturating doubling multiply returning
5600 // high half (scalar, by element)
5601 def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5602   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
5603   let Inst{11} = 0b0; // h
5604   let Inst{21} = Imm{1}; // l
5605   let Inst{20} = Imm{0}; // m
5606   let Inst{19-16} = MRm{3-0};
5607 }
5608 def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5609   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
5610   let Inst{11} = Imm{2}; // h
5611   let Inst{21} = Imm{1}; // l
5612   let Inst{20} = Imm{0}; // m
5613   let Inst{19-16} = MRm{3-0};
5614 }
5615 def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5616   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
5617   let Inst{11} = 0b0;    // h
5618   let Inst{21} = Imm{0}; // l
5619   let Inst{20-16} = MRm;
5620 }
5621 def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5622   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5623   let Inst{11} = Imm{1};    // h
5624   let Inst{21} = Imm{0};    // l
5625   let Inst{20-16} = MRm;
5626 }
5627
5628 // Patterns for Scalar Signed saturating doubling multiply returning
5629 // high half (scalar, by element)
5630 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
5631   SQDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16,
5632   i32, VPR64Lo, neon_uimm2_bare>;
5633 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
5634   SQDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16,
5635   i32, VPR128Lo, neon_uimm3_bare>;
5636 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
5637   SQDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32,
5638   i32, VPR64Lo, neon_uimm1_bare>;
5639 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
5640   SQDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32,
5641   i32, VPR128Lo, neon_uimm2_bare>;
5642
5643 // Scalar Signed saturating rounding doubling multiply
5644 // returning high half (scalar, by element)
5645 def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5646   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
5647   let Inst{11} = 0b0; // h
5648   let Inst{21} = Imm{1}; // l
5649   let Inst{20} = Imm{0}; // m
5650   let Inst{19-16} = MRm{3-0};
5651 }
5652 def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5653   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
5654   let Inst{11} = Imm{2}; // h
5655   let Inst{21} = Imm{1}; // l
5656   let Inst{20} = Imm{0}; // m
5657   let Inst{19-16} = MRm{3-0};
5658 }
5659 def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5660   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
5661   let Inst{11} = 0b0;    // h
5662   let Inst{21} = Imm{0}; // l
5663   let Inst{20-16} = MRm;
5664 }
5665 def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5666   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5667   let Inst{11} = Imm{1};    // h
5668   let Inst{21} = Imm{0};    // l
5669   let Inst{20-16} = MRm;
5670 }
5671
5672 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
5673   SQRDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16, i32,
5674   VPR64Lo, neon_uimm2_bare>;
5675 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
5676   SQRDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16, i32,
5677   VPR128Lo, neon_uimm3_bare>;
5678 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
5679   SQRDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32, i32,
5680   VPR64Lo, neon_uimm1_bare>;
5681 defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
5682   SQRDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32, i32,
5683   VPR128Lo, neon_uimm2_bare>;
5684
5685 // Scalar Copy - DUP element to scalar
5686 class NeonI_Scalar_DUP<string asmop, string asmlane,
5687                        RegisterClass ResRC, RegisterOperand VPRC,
5688                        Operand OpImm>
5689   : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
5690                      asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
5691                      [],
5692                      NoItinerary> {
5693   bits<4> Imm;
5694 }
5695
5696 def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
5697   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
5698 }
5699 def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
5700   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
5701 }
5702 def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
5703   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
5704 }
5705 def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
5706   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
5707 }
5708
5709 multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
5710   ValueType OpTy, Operand OpImm,
5711   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5712   def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
5713             (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
5714
5715   def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
5716             (ResTy (DUPI
5717               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
5718                 OpNImm:$Imm))>;
5719 }
5720
5721 multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh,
5722                  ValueType ResTy, ValueType OpTy> {
5723   def : Pat<(ResTy (GetLow VPR128:$Rn)),
5724             (ResTy (EXTRACT_SUBREG (OpTy VPR128:$Rn), sub_64))>;
5725   def : Pat<(ResTy (GetHigh VPR128:$Rn)),
5726             (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
5727 }
5728
5729 defm : NeonI_SDUP<Neon_Low16B, Neon_High16B, v8i8, v16i8>;
5730 defm : NeonI_SDUP<Neon_Low8H, Neon_High8H, v4i16, v8i16>;
5731 defm : NeonI_SDUP<Neon_Low4S, Neon_High4S, v2i32, v4i32>;
5732 defm : NeonI_SDUP<Neon_Low2D, Neon_High2D, v1i64, v2i64>;
5733 defm : NeonI_SDUP<Neon_Low4float, Neon_High4float, v2f32, v4f32>;
5734 defm : NeonI_SDUP<Neon_Low2double, Neon_High2double, v1f64, v2f64>;
5735
5736 // Patterns for vector extract of FP data using scalar DUP instructions
5737 defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32,
5738   v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5739 defm : NeonI_Scalar_DUP_Elt_pattern<DUPdv_D, f64,
5740   v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5741
5742 multiclass NeonI_Scalar_DUP_Vec_pattern<Instruction DUPI,
5743   ValueType ResTy, ValueType OpTy,Operand OpLImm,
5744   ValueType NOpTy, ValueType ExTy, Operand OpNImm> {
5745
5746   def : Pat<(ResTy (extract_subvector (OpTy VPR128:$Rn), OpLImm:$Imm)),
5747             (ResTy (DUPI VPR128:$Rn, OpLImm:$Imm))>;
5748
5749   def : Pat<(ResTy (extract_subvector (NOpTy VPR64:$Rn), OpNImm:$Imm)),
5750             (ResTy (DUPI
5751               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
5752                 OpNImm:$Imm))>;
5753 }
5754 // Patterns for extract subvectors of v1ix data using scalar DUP instructions
5755 defm : NeonI_Scalar_DUP_Vec_pattern<DUPbv_B,
5756   v1i8, v16i8, neon_uimm4_bare, v8i8, v16i8, neon_uimm3_bare>;
5757 defm : NeonI_Scalar_DUP_Vec_pattern<DUPhv_H,
5758   v1i16, v8i16, neon_uimm3_bare, v4i16, v8i16, neon_uimm2_bare>;
5759 defm : NeonI_Scalar_DUP_Vec_pattern<DUPsv_S,
5760   v1i32, v4i32, neon_uimm2_bare, v2i32, v4i32, neon_uimm1_bare>;
5761
5762
5763 multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
5764                                   Instruction DUPI, Operand OpImm,
5765                                   RegisterClass ResRC> {
5766   def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn" # asmlane # "[$Imm]"),
5767           (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
5768 }
5769
5770 // Aliases for Scalar copy - DUP element (scalar)
5771 // FIXME: This is actually the preferred syntax but TableGen can't deal with
5772 // custom printing of aliases.
5773 defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
5774 defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
5775 defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
5776 defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
5777
5778
5779 //===----------------------------------------------------------------------===//
5780 // Non-Instruction Patterns
5781 //===----------------------------------------------------------------------===//
5782
5783 // 64-bit vector bitcasts...
5784
5785 def : Pat<(v1i64 (bitconvert (v8i8  VPR64:$src))), (v1i64 VPR64:$src)>;
5786 def : Pat<(v2f32 (bitconvert (v8i8  VPR64:$src))), (v2f32 VPR64:$src)>;
5787 def : Pat<(v2i32 (bitconvert (v8i8  VPR64:$src))), (v2i32 VPR64:$src)>;
5788 def : Pat<(v4i16 (bitconvert (v8i8  VPR64:$src))), (v4i16 VPR64:$src)>;
5789
5790 def : Pat<(v1i64 (bitconvert (v4i16  VPR64:$src))), (v1i64 VPR64:$src)>;
5791 def : Pat<(v2i32 (bitconvert (v4i16  VPR64:$src))), (v2i32 VPR64:$src)>;
5792 def : Pat<(v2f32 (bitconvert (v4i16  VPR64:$src))), (v2f32 VPR64:$src)>;
5793 def : Pat<(v8i8  (bitconvert (v4i16  VPR64:$src))), (v8i8 VPR64:$src)>;
5794
5795 def : Pat<(v1i64 (bitconvert (v2i32  VPR64:$src))), (v1i64 VPR64:$src)>;
5796 def : Pat<(v2f32 (bitconvert (v2i32  VPR64:$src))), (v2f32 VPR64:$src)>;
5797 def : Pat<(v4i16 (bitconvert (v2i32  VPR64:$src))), (v4i16 VPR64:$src)>;
5798 def : Pat<(v8i8  (bitconvert (v2i32  VPR64:$src))), (v8i8 VPR64:$src)>;
5799
5800 def : Pat<(v1i64 (bitconvert (v2f32  VPR64:$src))), (v1i64 VPR64:$src)>;
5801 def : Pat<(v2i32 (bitconvert (v2f32  VPR64:$src))), (v2i32 VPR64:$src)>;
5802 def : Pat<(v4i16 (bitconvert (v2f32  VPR64:$src))), (v4i16 VPR64:$src)>;
5803 def : Pat<(v8i8  (bitconvert (v2f32  VPR64:$src))), (v8i8 VPR64:$src)>;
5804
5805 def : Pat<(v2f32 (bitconvert (v1i64  VPR64:$src))), (v2f32 VPR64:$src)>;
5806 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
5807 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
5808 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
5809
5810 // ..and 128-bit vector bitcasts...
5811
5812 def : Pat<(v2f64 (bitconvert (v16i8  VPR128:$src))), (v2f64 VPR128:$src)>;
5813 def : Pat<(v2i64 (bitconvert (v16i8  VPR128:$src))), (v2i64 VPR128:$src)>;
5814 def : Pat<(v4f32 (bitconvert (v16i8  VPR128:$src))), (v4f32 VPR128:$src)>;
5815 def : Pat<(v4i32 (bitconvert (v16i8  VPR128:$src))), (v4i32 VPR128:$src)>;
5816 def : Pat<(v8i16 (bitconvert (v16i8  VPR128:$src))), (v8i16 VPR128:$src)>;
5817
5818 def : Pat<(v2f64 (bitconvert (v8i16  VPR128:$src))), (v2f64 VPR128:$src)>;
5819 def : Pat<(v2i64 (bitconvert (v8i16  VPR128:$src))), (v2i64 VPR128:$src)>;
5820 def : Pat<(v4i32 (bitconvert (v8i16  VPR128:$src))), (v4i32 VPR128:$src)>;
5821 def : Pat<(v4f32 (bitconvert (v8i16  VPR128:$src))), (v4f32 VPR128:$src)>;
5822 def : Pat<(v16i8 (bitconvert (v8i16  VPR128:$src))), (v16i8 VPR128:$src)>;
5823
5824 def : Pat<(v2f64 (bitconvert (v4i32  VPR128:$src))), (v2f64 VPR128:$src)>;
5825 def : Pat<(v2i64 (bitconvert (v4i32  VPR128:$src))), (v2i64 VPR128:$src)>;
5826 def : Pat<(v4f32 (bitconvert (v4i32  VPR128:$src))), (v4f32 VPR128:$src)>;
5827 def : Pat<(v8i16 (bitconvert (v4i32  VPR128:$src))), (v8i16 VPR128:$src)>;
5828 def : Pat<(v16i8 (bitconvert (v4i32  VPR128:$src))), (v16i8 VPR128:$src)>;
5829
5830 def : Pat<(v2f64 (bitconvert (v4f32  VPR128:$src))), (v2f64 VPR128:$src)>;
5831 def : Pat<(v2i64 (bitconvert (v4f32  VPR128:$src))), (v2i64 VPR128:$src)>;
5832 def : Pat<(v4i32 (bitconvert (v4f32  VPR128:$src))), (v4i32 VPR128:$src)>;
5833 def : Pat<(v8i16 (bitconvert (v4f32  VPR128:$src))), (v8i16 VPR128:$src)>;
5834 def : Pat<(v16i8 (bitconvert (v4f32  VPR128:$src))), (v16i8 VPR128:$src)>;
5835
5836 def : Pat<(v2f64 (bitconvert (v2i64  VPR128:$src))), (v2f64 VPR128:$src)>;
5837 def : Pat<(v4f32 (bitconvert (v2i64  VPR128:$src))), (v4f32 VPR128:$src)>;
5838 def : Pat<(v4i32 (bitconvert (v2i64  VPR128:$src))), (v4i32 VPR128:$src)>;
5839 def : Pat<(v8i16 (bitconvert (v2i64  VPR128:$src))), (v8i16 VPR128:$src)>;
5840 def : Pat<(v16i8 (bitconvert (v2i64  VPR128:$src))), (v16i8 VPR128:$src)>;
5841
5842 def : Pat<(v2i64 (bitconvert (v2f64  VPR128:$src))), (v2i64 VPR128:$src)>;
5843 def : Pat<(v4f32 (bitconvert (v2f64  VPR128:$src))), (v4f32 VPR128:$src)>;
5844 def : Pat<(v4i32 (bitconvert (v2f64  VPR128:$src))), (v4i32 VPR128:$src)>;
5845 def : Pat<(v8i16 (bitconvert (v2f64  VPR128:$src))), (v8i16 VPR128:$src)>;
5846 def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
5847
5848
5849 // ...and scalar bitcasts...
5850 def : Pat<(f16 (bitconvert (v1i16  FPR16:$src))), (f16 FPR16:$src)>;
5851 def : Pat<(f32 (bitconvert (v1i32  FPR32:$src))), (f32 FPR32:$src)>;
5852 def : Pat<(f64 (bitconvert (v1i64  FPR64:$src))), (f64 FPR64:$src)>;
5853 def : Pat<(f32 (bitconvert (v1f32  FPR32:$src))), (f32 FPR32:$src)>;
5854 def : Pat<(f64 (bitconvert (v1f64  FPR64:$src))), (f64 FPR64:$src)>;
5855
5856 def : Pat<(i64 (bitconvert (v1i64  FPR64:$src))), (FMOVxd $src)>;
5857 def : Pat<(i64 (bitconvert (v1f64  FPR64:$src))), (FMOVxd $src)>;
5858 def : Pat<(i64 (bitconvert (v2i32  FPR64:$src))), (FMOVxd $src)>;
5859 def : Pat<(i64 (bitconvert (v2f32  FPR64:$src))), (FMOVxd $src)>;
5860 def : Pat<(i64 (bitconvert (v4i16  FPR64:$src))), (FMOVxd $src)>;
5861 def : Pat<(i64 (bitconvert (v8i8  FPR64:$src))), (FMOVxd $src)>;
5862
5863 def : Pat<(i32 (bitconvert (v1i32  FPR32:$src))), (FMOVws $src)>;
5864
5865 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
5866 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
5867 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
5868
5869 def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))), (f64 VPR64:$src)>;
5870 def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))), (f64 VPR64:$src)>;
5871 def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))), (f64 VPR64:$src)>;
5872 def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))), (f64 VPR64:$src)>;
5873 def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))), (f64 VPR64:$src)>;
5874
5875 def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))), (f128 VPR128:$src)>;
5876 def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))), (f128 VPR128:$src)>;
5877 def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))), (f128 VPR128:$src)>;
5878 def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))), (f128 VPR128:$src)>;
5879 def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))), (f128 VPR128:$src)>;
5880 def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))), (f128 VPR128:$src)>;
5881
5882 def : Pat<(v1i16 (bitconvert (f16  FPR16:$src))), (v1i16 FPR16:$src)>;
5883 def : Pat<(v1i32 (bitconvert (f32  FPR32:$src))), (v1i32 FPR32:$src)>;
5884 def : Pat<(v1i64 (bitconvert (f64  FPR64:$src))), (v1i64 FPR64:$src)>;
5885 def : Pat<(v1f32 (bitconvert (f32  FPR32:$src))), (v1f32 FPR32:$src)>;
5886 def : Pat<(v1f64 (bitconvert (f64  FPR64:$src))), (v1f64 FPR64:$src)>;
5887
5888 def : Pat<(v1i64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5889 def : Pat<(v1f64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5890 def : Pat<(v2i32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5891 def : Pat<(v2f32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5892 def : Pat<(v4i16 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5893 def : Pat<(v8i8 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5894
5895 def : Pat<(v1i32 (bitconvert (i32  GPR32:$src))), (FMOVsw $src)>;
5896
5897 def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))), (v8i8 FPR64:$src)>;
5898 def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
5899 def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
5900 def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
5901 def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
5902
5903 def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))), (v16i8 FPR128:$src)>;
5904 def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))), (v8i16 FPR128:$src)>;
5905 def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))), (v4i32 FPR128:$src)>;
5906 def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))), (v2i64 FPR128:$src)>;
5907 def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))), (v4f32 FPR128:$src)>;
5908 def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))), (v2f64 FPR128:$src)>;
5909
5910 // Scalar Three Same
5911
5912 def neon_uimm3 : Operand<i64>,
5913                    ImmLeaf<i64, [{return Imm < 8;}]> {
5914   let ParserMatchClass = uimm3_asmoperand;
5915   let PrintMethod = "printUImmHexOperand";
5916 }
5917
5918 def neon_uimm4 : Operand<i64>,
5919                    ImmLeaf<i64, [{return Imm < 16;}]> {
5920   let ParserMatchClass = uimm4_asmoperand;
5921   let PrintMethod = "printUImmHexOperand";
5922 }
5923
5924 // Bitwise Extract
5925 class NeonI_Extract<bit q, bits<2> op2, string asmop,
5926                     string OpS, RegisterOperand OpVPR, Operand OpImm>
5927   : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
5928                      (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
5929                      asmop # "\t$Rd." # OpS # ", $Rn." # OpS # 
5930                      ", $Rm." # OpS # ", $Index",
5931                      [],
5932                      NoItinerary>{
5933   bits<4> Index;
5934 }
5935
5936 def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
5937                                VPR64, neon_uimm3> {
5938   let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
5939 }
5940
5941 def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
5942                                VPR128, neon_uimm4> {
5943   let Inst{14-11} = Index;
5944 }
5945
5946 class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
5947                  Operand OpImm> 
5948   : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
5949                                  (i64 OpImm:$Imm))),
5950               (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
5951
5952 def : NI_Extract<v8i8,  VPR64,  EXTvvvi_8b,  neon_uimm3>;
5953 def : NI_Extract<v4i16, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5954 def : NI_Extract<v2i32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5955 def : NI_Extract<v1i64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5956 def : NI_Extract<v2f32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5957 def : NI_Extract<v1f64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5958 def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
5959 def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
5960 def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
5961 def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
5962 def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
5963 def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
5964
5965 // Table lookup
5966 class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
5967              string asmop, string OpS, RegisterOperand OpVPR,
5968              RegisterOperand VecList>
5969   : NeonI_TBL<q, op2, len, op,
5970               (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
5971               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
5972               [],
5973               NoItinerary>;
5974
5975 // The vectors in look up table are always 16b
5976 multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
5977   def _8b  : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
5978                     !cast<RegisterOperand>(List # "16B_operand")>;
5979
5980   def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
5981                     !cast<RegisterOperand>(List # "16B_operand")>;
5982 }
5983
5984 defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
5985 defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
5986 defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
5987 defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
5988
5989 // Table lookup extention
5990 class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
5991              string asmop, string OpS, RegisterOperand OpVPR,
5992              RegisterOperand VecList>
5993   : NeonI_TBL<q, op2, len, op,
5994               (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
5995               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
5996               [],
5997               NoItinerary> {
5998   let Constraints = "$src = $Rd";
5999 }
6000
6001 // The vectors in look up table are always 16b
6002 multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
6003   def _8b  : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
6004                     !cast<RegisterOperand>(List # "16B_operand")>;
6005
6006   def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
6007                     !cast<RegisterOperand>(List # "16B_operand")>;
6008 }
6009
6010 defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
6011 defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
6012 defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
6013 defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
6014
6015 // The followings are for instruction class (3V Elem)
6016
6017 // Variant 1
6018
6019 class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
6020              string asmop, string ResS, string OpS, string EleOpS,
6021              Operand OpImm, RegisterOperand ResVPR,
6022              RegisterOperand OpVPR, RegisterOperand EleOpVPR>
6023   : NeonI_2VElem<q, u, size, opcode, 
6024                  (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
6025                                          EleOpVPR:$Re, OpImm:$Index),
6026                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
6027                  ", $Re." # EleOpS # "[$Index]",
6028                  [],
6029                  NoItinerary> {
6030   bits<3> Index;
6031   bits<5> Re;
6032
6033   let Constraints = "$src = $Rd";
6034 }
6035
6036 multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
6037   // vector register class for element is always 128-bit to cover the max index
6038   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
6039                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
6040     let Inst{11} = {Index{1}};
6041     let Inst{21} = {Index{0}};
6042     let Inst{20-16} = Re;
6043   }
6044
6045   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
6046                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
6047     let Inst{11} = {Index{1}};
6048     let Inst{21} = {Index{0}};
6049     let Inst{20-16} = Re;
6050   }
6051
6052   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
6053   def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
6054                      neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
6055     let Inst{11} = {Index{2}};
6056     let Inst{21} = {Index{1}};
6057     let Inst{20} = {Index{0}};
6058     let Inst{19-16} = Re{3-0};
6059   }
6060
6061   def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
6062                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
6063     let Inst{11} = {Index{2}};
6064     let Inst{21} = {Index{1}};
6065     let Inst{20} = {Index{0}};
6066     let Inst{19-16} = Re{3-0};
6067   }
6068 }
6069
6070 defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
6071 defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
6072
6073 // Pattern for lane in 128-bit vector
6074 class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
6075                    RegisterOperand ResVPR, RegisterOperand OpVPR,
6076                    RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
6077                    ValueType EleOpTy>
6078   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
6079           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6080         (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
6081
6082 // Pattern for lane in 64-bit vector
6083 class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
6084                   RegisterOperand ResVPR, RegisterOperand OpVPR,
6085                   RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
6086                   ValueType EleOpTy>
6087   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
6088           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6089         (INST ResVPR:$src, OpVPR:$Rn, 
6090           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
6091
6092 multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
6093 {
6094   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
6095                      op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32>;
6096
6097   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
6098                      op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32>;
6099
6100   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
6101                      op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
6102
6103   def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
6104                      op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
6105
6106   // Index can only be half of the max value for lane in 64-bit vector
6107
6108   def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
6109                     op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32>;
6110
6111   def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
6112                     op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
6113 }
6114
6115 defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
6116 defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
6117
6118 class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
6119                  string asmop, string ResS, string OpS, string EleOpS,
6120                  Operand OpImm, RegisterOperand ResVPR,
6121                  RegisterOperand OpVPR, RegisterOperand EleOpVPR>
6122   : NeonI_2VElem<q, u, size, opcode, 
6123                  (outs ResVPR:$Rd), (ins OpVPR:$Rn,
6124                                          EleOpVPR:$Re, OpImm:$Index),
6125                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
6126                  ", $Re." # EleOpS # "[$Index]",
6127                  [],
6128                  NoItinerary> {
6129   bits<3> Index;
6130   bits<5> Re;
6131 }
6132
6133 multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
6134   // vector register class for element is always 128-bit to cover the max index
6135   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
6136                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
6137     let Inst{11} = {Index{1}};
6138     let Inst{21} = {Index{0}};
6139     let Inst{20-16} = Re;
6140   }
6141
6142   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
6143                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
6144     let Inst{11} = {Index{1}};
6145     let Inst{21} = {Index{0}};
6146     let Inst{20-16} = Re;
6147   }
6148
6149   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
6150   def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
6151                          neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
6152     let Inst{11} = {Index{2}};
6153     let Inst{21} = {Index{1}};
6154     let Inst{20} = {Index{0}};
6155     let Inst{19-16} = Re{3-0};
6156   }
6157
6158   def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
6159                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
6160     let Inst{11} = {Index{2}};
6161     let Inst{21} = {Index{1}};
6162     let Inst{20} = {Index{0}};
6163     let Inst{19-16} = Re{3-0};
6164   }
6165 }
6166
6167 defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
6168 defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
6169 defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
6170
6171 // Pattern for lane in 128-bit vector
6172 class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
6173                        RegisterOperand OpVPR, RegisterOperand EleOpVPR,
6174                        ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
6175   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
6176           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6177         (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
6178
6179 // Pattern for lane in 64-bit vector
6180 class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
6181                       RegisterOperand OpVPR, RegisterOperand EleOpVPR,
6182                       ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
6183   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
6184           (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6185         (INST OpVPR:$Rn, 
6186           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
6187
6188 multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
6189   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
6190                          op, VPR64, VPR128, v2i32, v2i32, v4i32>;
6191
6192   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
6193                          op, VPR128, VPR128, v4i32, v4i32, v4i32>;
6194
6195   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
6196                          op, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
6197
6198   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
6199                          op, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
6200
6201   // Index can only be half of the max value for lane in 64-bit vector
6202
6203   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
6204                         op, VPR64, VPR64, v2i32, v2i32, v2i32>;
6205
6206   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
6207                         op, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
6208 }
6209
6210 defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
6211 defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
6212 defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
6213
6214 // Variant 2
6215
6216 multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
6217   // vector register class for element is always 128-bit to cover the max index
6218   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
6219                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
6220     let Inst{11} = {Index{1}};
6221     let Inst{21} = {Index{0}};
6222     let Inst{20-16} = Re;
6223   }
6224
6225   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
6226                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
6227     let Inst{11} = {Index{1}};
6228     let Inst{21} = {Index{0}};
6229     let Inst{20-16} = Re;
6230   }
6231
6232   // _1d2d doesn't exist!
6233
6234   def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
6235                          neon_uimm1_bare, VPR128, VPR128, VPR128> {
6236     let Inst{11} = {Index{0}};
6237     let Inst{21} = 0b0;
6238     let Inst{20-16} = Re;
6239   }
6240 }
6241
6242 defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
6243 defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
6244
6245 class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
6246                          RegisterOperand OpVPR, RegisterOperand EleOpVPR,
6247                          ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
6248                          SDPatternOperator coreop>
6249   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
6250           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
6251         (INST OpVPR:$Rn, 
6252           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
6253
6254 multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
6255   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
6256                          op, VPR64, VPR128, v2f32, v2f32, v4f32>;
6257
6258   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
6259                          op, VPR128, VPR128, v4f32, v4f32, v4f32>;
6260
6261   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
6262                          op, VPR128, VPR128, v2f64, v2f64, v2f64>;
6263
6264   // Index can only be half of the max value for lane in 64-bit vector
6265
6266   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
6267                         op, VPR64, VPR64, v2f32, v2f32, v2f32>;
6268
6269   def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
6270                            op, VPR128, VPR64, v2f64, v2f64, v1f64,
6271                            BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
6272 }
6273
6274 defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
6275 defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
6276
6277 // The followings are patterns using fma
6278 // -ffp-contract=fast generates fma
6279
6280 multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
6281   // vector register class for element is always 128-bit to cover the max index
6282   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
6283                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
6284     let Inst{11} = {Index{1}};
6285     let Inst{21} = {Index{0}};
6286     let Inst{20-16} = Re;
6287   }
6288
6289   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
6290                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
6291     let Inst{11} = {Index{1}};
6292     let Inst{21} = {Index{0}};
6293     let Inst{20-16} = Re;
6294   }
6295
6296   // _1d2d doesn't exist!
6297   
6298   def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
6299                      neon_uimm1_bare, VPR128, VPR128, VPR128> {
6300     let Inst{11} = {Index{0}};
6301     let Inst{21} = 0b0;
6302     let Inst{20-16} = Re;
6303   }
6304 }
6305
6306 defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
6307 defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
6308
6309 // Pattern for lane in 128-bit vector
6310 class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
6311                        RegisterOperand ResVPR, RegisterOperand OpVPR,
6312                        ValueType ResTy, ValueType OpTy,
6313                        SDPatternOperator coreop>
6314   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
6315                    (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
6316         (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
6317
6318 // Pattern for lane in 64-bit vector
6319 class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
6320                       RegisterOperand ResVPR, RegisterOperand OpVPR,
6321                       ValueType ResTy, ValueType OpTy,
6322                       SDPatternOperator coreop>
6323   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
6324                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
6325         (INST ResVPR:$src, ResVPR:$Rn, 
6326           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
6327
6328 // Pattern for lane in 64-bit vector
6329 class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
6330                            SDPatternOperator op,
6331                            RegisterOperand ResVPR, RegisterOperand OpVPR,
6332                            ValueType ResTy, ValueType OpTy,
6333                            SDPatternOperator coreop>
6334   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
6335                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
6336         (INST ResVPR:$src, ResVPR:$Rn, 
6337           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
6338
6339
6340 multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
6341   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
6342                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
6343                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6344
6345   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
6346                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
6347                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6348
6349   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
6350                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
6351                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6352
6353   // Index can only be half of the max value for lane in 64-bit vector
6354
6355   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
6356                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
6357                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6358
6359   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
6360                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
6361                              BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
6362 }
6363
6364 defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
6365
6366 multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
6367 {
6368   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
6369                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
6370                          BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
6371
6372   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
6373                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
6374                          BinOpFrag<(Neon_vduplane
6375                                      (fneg node:$LHS), node:$RHS)>>;
6376
6377   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
6378                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
6379                          BinOpFrag<(fneg (Neon_vduplane
6380                                      node:$LHS, node:$RHS))>>;
6381
6382   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
6383                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
6384                          BinOpFrag<(Neon_vduplane
6385                                      (fneg node:$LHS), node:$RHS)>>;
6386
6387   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
6388                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
6389                          BinOpFrag<(fneg (Neon_vduplane
6390                                      node:$LHS, node:$RHS))>>;
6391
6392   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
6393                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
6394                          BinOpFrag<(Neon_vduplane
6395                                      (fneg node:$LHS), node:$RHS)>>;
6396
6397   // Index can only be half of the max value for lane in 64-bit vector
6398
6399   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
6400                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
6401                         BinOpFrag<(fneg (Neon_vduplane
6402                                     node:$LHS, node:$RHS))>>;
6403
6404   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
6405                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
6406                         BinOpFrag<(Neon_vduplane
6407                                     (fneg node:$LHS), node:$RHS)>>;
6408
6409   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
6410                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
6411                         BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
6412
6413   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
6414                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
6415                         BinOpFrag<(Neon_vduplane (fneg node:$LHS), node:$RHS)>>;
6416
6417   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
6418                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
6419                              BinOpFrag<(fneg (Neon_combine_2d
6420                                          node:$LHS, node:$RHS))>>;
6421
6422   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
6423                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
6424                              BinOpFrag<(Neon_combine_2d
6425                                          (fneg node:$LHS), (fneg node:$RHS))>>;
6426 }
6427
6428 defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
6429
6430 // Variant 3: Long type
6431 // E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
6432 //      SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
6433
6434 multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
6435   // vector register class for element is always 128-bit to cover the max index
6436   def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
6437                      neon_uimm2_bare, VPR128, VPR64, VPR128> {
6438     let Inst{11} = {Index{1}};
6439     let Inst{21} = {Index{0}};
6440     let Inst{20-16} = Re;
6441   }
6442   
6443   def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
6444                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
6445     let Inst{11} = {Index{1}};
6446     let Inst{21} = {Index{0}};
6447     let Inst{20-16} = Re;
6448   }
6449
6450   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
6451   def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
6452                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
6453     let Inst{11} = {Index{2}};
6454     let Inst{21} = {Index{1}};
6455     let Inst{20} = {Index{0}};
6456     let Inst{19-16} = Re{3-0};
6457   }
6458   
6459   def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
6460                      neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
6461     let Inst{11} = {Index{2}};
6462     let Inst{21} = {Index{1}};
6463     let Inst{20} = {Index{0}};
6464     let Inst{19-16} = Re{3-0};
6465   }
6466 }
6467
6468 defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
6469 defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
6470 defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
6471 defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
6472 defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
6473 defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
6474
6475 multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
6476   // vector register class for element is always 128-bit to cover the max index
6477   def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
6478                          neon_uimm2_bare, VPR128, VPR64, VPR128> {
6479     let Inst{11} = {Index{1}};
6480     let Inst{21} = {Index{0}};
6481     let Inst{20-16} = Re;
6482   }
6483   
6484   def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
6485                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
6486     let Inst{11} = {Index{1}};
6487     let Inst{21} = {Index{0}};
6488     let Inst{20-16} = Re;
6489   }
6490
6491   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
6492   def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
6493                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
6494     let Inst{11} = {Index{2}};
6495     let Inst{21} = {Index{1}};
6496     let Inst{20} = {Index{0}};
6497     let Inst{19-16} = Re{3-0};
6498   }
6499   
6500   def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
6501                          neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
6502     let Inst{11} = {Index{2}};
6503     let Inst{21} = {Index{1}};
6504     let Inst{20} = {Index{0}};
6505     let Inst{19-16} = Re{3-0};
6506   }
6507 }
6508
6509 defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
6510 defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
6511 defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
6512
6513 // Pattern for lane in 128-bit vector
6514 class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
6515                      RegisterOperand EleOpVPR, ValueType ResTy,
6516                      ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
6517                      SDPatternOperator hiop>
6518   : Pat<(ResTy (op (ResTy VPR128:$src),
6519           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
6520           (HalfOpTy (Neon_vduplane
6521                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6522         (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
6523
6524 // Pattern for lane in 64-bit vector
6525 class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
6526                     RegisterOperand EleOpVPR, ValueType ResTy,
6527                     ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
6528                     SDPatternOperator hiop>
6529   : Pat<(ResTy (op (ResTy VPR128:$src),
6530           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
6531           (HalfOpTy (Neon_vduplane
6532                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6533         (INST VPR128:$src, VPR128:$Rn, 
6534           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
6535
6536 multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
6537   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
6538                      op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
6539   
6540   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
6541                      op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32>;
6542   
6543   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
6544                        op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
6545   
6546   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
6547                        op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
6548   
6549   // Index can only be half of the max value for lane in 64-bit vector
6550
6551   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
6552                     op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
6553   
6554   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
6555                     op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32>;
6556
6557   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
6558                       op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
6559   
6560   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
6561                       op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
6562 }
6563
6564 defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
6565 defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
6566 defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
6567 defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
6568
6569 // Pattern for lane in 128-bit vector
6570 class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
6571                          RegisterOperand EleOpVPR, ValueType ResTy,
6572                          ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
6573                          SDPatternOperator hiop>
6574   : Pat<(ResTy (op 
6575           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
6576           (HalfOpTy (Neon_vduplane
6577                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6578         (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
6579
6580 // Pattern for lane in 64-bit vector
6581 class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
6582                         RegisterOperand EleOpVPR, ValueType ResTy,
6583                         ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
6584                         SDPatternOperator hiop>
6585   : Pat<(ResTy (op
6586           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
6587           (HalfOpTy (Neon_vduplane
6588                       (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6589         (INST VPR128:$Rn, 
6590           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
6591
6592 multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
6593   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
6594                          op, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
6595
6596   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
6597                          op, VPR64, VPR128, v2i64, v2i32, v4i32>;
6598
6599   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
6600                          op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
6601   
6602   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
6603                            op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
6604   
6605   // Index can only be half of the max value for lane in 64-bit vector
6606
6607   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
6608                         op, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
6609
6610   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
6611                         op, VPR64, VPR64, v2i64, v2i32, v2i32>;
6612
6613   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
6614                           op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
6615   
6616   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
6617                           op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
6618 }
6619
6620 defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
6621 defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
6622 defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
6623
6624 multiclass NI_qdma<SDPatternOperator op> {
6625   def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
6626                     (op node:$Ra,
6627                       (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
6628
6629   def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
6630                     (op node:$Ra,
6631                       (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
6632 }
6633
6634 defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
6635 defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
6636
6637 multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
6638   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
6639                      !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
6640                      v4i32, v4i16, v8i16>;
6641   
6642   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
6643                      !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
6644                      v2i64, v2i32, v4i32>;
6645   
6646   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
6647                        !cast<PatFrag>(op # "_4s"), VPR128Lo,
6648                        v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
6649   
6650   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
6651                        !cast<PatFrag>(op # "_2d"), VPR128,
6652                        v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
6653   
6654   // Index can only be half of the max value for lane in 64-bit vector
6655
6656   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
6657                     !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
6658                     v4i32, v4i16, v4i16>;
6659   
6660   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
6661                     !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
6662                     v2i64, v2i32, v2i32>;
6663
6664   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
6665                       !cast<PatFrag>(op # "_4s"), VPR64Lo,
6666                       v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
6667   
6668   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
6669                       !cast<PatFrag>(op # "_2d"), VPR64,
6670                       v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
6671 }
6672
6673 defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
6674 defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
6675
6676 // End of implementation for instruction class (3V Elem)
6677
6678 class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
6679                      RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
6680   : NeonI_copy<0b1, 0b0, 0b0011,
6681                (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
6682                asmop # "\t$Rd." # Res # "[$Imm], $Rn",
6683                [(set (ResTy VPR128:$Rd),
6684                  (ResTy (vector_insert
6685                    (ResTy VPR128:$src),
6686                    (OpTy OpGPR:$Rn),
6687                    (OpImm:$Imm))))],
6688                NoItinerary> {
6689   bits<4> Imm;
6690   let Constraints = "$src = $Rd";
6691 }
6692
6693 //Insert element (vector, from main)
6694 def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
6695                            neon_uimm4_bare> {
6696   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6697 }
6698 def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
6699                            neon_uimm3_bare> {
6700   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6701 }
6702 def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
6703                            neon_uimm2_bare> {
6704   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6705 }
6706 def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
6707                            neon_uimm1_bare> {
6708   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6709 }
6710
6711 def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
6712                     (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
6713 def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
6714                     (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
6715 def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
6716                     (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
6717 def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
6718                     (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
6719
6720 class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
6721                              RegisterClass OpGPR, ValueType OpTy, 
6722                              Operand OpImm, Instruction INS> 
6723   : Pat<(ResTy (vector_insert
6724               (ResTy VPR64:$src),
6725               (OpTy OpGPR:$Rn),
6726               (OpImm:$Imm))),
6727         (ResTy (EXTRACT_SUBREG 
6728           (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
6729             OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
6730
6731 def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
6732                                           neon_uimm3_bare, INSbw>;
6733 def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
6734                                           neon_uimm2_bare, INShw>;
6735 def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
6736                                           neon_uimm1_bare, INSsw>;
6737 def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
6738                                           neon_uimm0_bare, INSdx>;
6739
6740 class NeonI_INS_element<string asmop, string Res, Operand ResImm>
6741   : NeonI_insert<0b1, 0b1,
6742                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, 
6743                  ResImm:$Immd, ResImm:$Immn),
6744                  asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
6745                  [],
6746                  NoItinerary> {
6747   let Constraints = "$src = $Rd";
6748   bits<4> Immd;
6749   bits<4> Immn;
6750 }
6751
6752 //Insert element (vector, from element)
6753 def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
6754   let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
6755   let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
6756 }
6757 def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
6758   let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
6759   let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
6760   // bit 11 is unspecified, but should be set to zero.
6761 }
6762 def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
6763   let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
6764   let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
6765   // bits 11-12 are unspecified, but should be set to zero.
6766 }
6767 def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
6768   let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
6769   let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
6770   // bits 11-13 are unspecified, but should be set to zero.
6771 }
6772
6773 def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
6774                     (INSELb VPR128:$Rd, VPR128:$Rn,
6775                       neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
6776 def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
6777                     (INSELh VPR128:$Rd, VPR128:$Rn,
6778                       neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
6779 def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
6780                     (INSELs VPR128:$Rd, VPR128:$Rn,
6781                       neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
6782 def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
6783                     (INSELd VPR128:$Rd, VPR128:$Rn,
6784                       neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
6785
6786 multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
6787                                 ValueType MidTy, Operand StImm, Operand NaImm,
6788                                 Instruction INS> {
6789 def : Pat<(ResTy (vector_insert
6790             (ResTy VPR128:$src),
6791             (MidTy (vector_extract
6792               (ResTy VPR128:$Rn),
6793               (StImm:$Immn))),
6794             (StImm:$Immd))),
6795           (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
6796               StImm:$Immd, StImm:$Immn)>;
6797
6798 def : Pat <(ResTy (vector_insert
6799              (ResTy VPR128:$src),
6800              (MidTy (vector_extract
6801                (NaTy VPR64:$Rn),
6802                (NaImm:$Immn))),
6803              (StImm:$Immd))),
6804            (INS (ResTy VPR128:$src),
6805              (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6806              StImm:$Immd, NaImm:$Immn)>;
6807
6808 def : Pat <(NaTy (vector_insert
6809              (NaTy VPR64:$src),
6810              (MidTy (vector_extract
6811                (ResTy VPR128:$Rn),
6812                (StImm:$Immn))),
6813              (NaImm:$Immd))),
6814            (NaTy (EXTRACT_SUBREG
6815              (ResTy (INS
6816                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6817                (ResTy VPR128:$Rn),
6818                NaImm:$Immd, StImm:$Immn)),
6819              sub_64))>;
6820
6821 def : Pat <(NaTy (vector_insert
6822              (NaTy VPR64:$src),
6823              (MidTy (vector_extract
6824                (NaTy VPR64:$Rn),
6825                (NaImm:$Immn))),
6826              (NaImm:$Immd))),
6827            (NaTy (EXTRACT_SUBREG
6828              (ResTy (INS
6829                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6830                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6831                NaImm:$Immd, NaImm:$Immn)),
6832              sub_64))>;
6833 }
6834
6835 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
6836                             neon_uimm1_bare, INSELs>;
6837 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
6838                             neon_uimm0_bare, INSELd>;
6839 defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
6840                             neon_uimm3_bare, INSELb>;
6841 defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
6842                             neon_uimm2_bare, INSELh>;
6843 defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6844                             neon_uimm1_bare, INSELs>;
6845 defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
6846                             neon_uimm0_bare, INSELd>;
6847
6848 multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
6849                                       ValueType MidTy,
6850                                       RegisterClass OpFPR, Operand ResImm,
6851                                       SubRegIndex SubIndex, Instruction INS> {
6852 def : Pat <(ResTy (vector_insert
6853              (ResTy VPR128:$src),
6854              (MidTy OpFPR:$Rn),
6855              (ResImm:$Imm))),
6856            (INS (ResTy VPR128:$src),
6857              (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
6858              ResImm:$Imm,
6859              (i64 0))>;
6860
6861 def : Pat <(NaTy (vector_insert
6862              (NaTy VPR64:$src),
6863              (MidTy OpFPR:$Rn),
6864              (ResImm:$Imm))),
6865            (NaTy (EXTRACT_SUBREG 
6866              (ResTy (INS 
6867                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6868                (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
6869                ResImm:$Imm,
6870                (i64 0))),
6871              sub_64))>;
6872 }
6873
6874 defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
6875                                   sub_32, INSELs>;
6876 defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
6877                                   sub_64, INSELd>;
6878
6879 class NeonI_SMOV<string asmop, string Res, bit Q,
6880                  ValueType OpTy, ValueType eleTy,
6881                  Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
6882   : NeonI_copy<Q, 0b0, 0b0101,
6883                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
6884                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
6885                [(set (ResTy ResGPR:$Rd),
6886                  (ResTy (sext_inreg
6887                    (ResTy (vector_extract
6888                      (OpTy VPR128:$Rn), (OpImm:$Imm))),
6889                    eleTy)))],
6890                NoItinerary> {
6891   bits<4> Imm;
6892 }
6893
6894 //Signed integer move (main, from element)
6895 def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
6896                         GPR32, i32> {
6897   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6898 }
6899 def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
6900                         GPR32, i32> {
6901   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6902 }
6903 def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
6904                         GPR64, i64> {
6905   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6906 }
6907 def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
6908                         GPR64, i64> {
6909   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6910 }
6911 def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
6912                         GPR64, i64> {
6913   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6914 }
6915
6916 multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
6917                                ValueType eleTy, Operand StImm,  Operand NaImm,
6918                                Instruction SMOVI> {
6919   def : Pat<(i64 (sext_inreg
6920               (i64 (anyext
6921                 (i32 (vector_extract
6922                   (StTy VPR128:$Rn), (StImm:$Imm))))),
6923               eleTy)),
6924             (SMOVI VPR128:$Rn, StImm:$Imm)>;
6925   
6926   def : Pat<(i64 (sext
6927               (i32 (vector_extract
6928                 (StTy VPR128:$Rn), (StImm:$Imm))))),
6929             (SMOVI VPR128:$Rn, StImm:$Imm)>;
6930   
6931   def : Pat<(i64 (sext_inreg
6932               (i64 (vector_extract
6933                 (NaTy VPR64:$Rn), (NaImm:$Imm))),
6934               eleTy)),
6935             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6936               NaImm:$Imm)>;
6937   
6938   def : Pat<(i64 (sext_inreg
6939               (i64 (anyext
6940                 (i32 (vector_extract
6941                   (NaTy VPR64:$Rn), (NaImm:$Imm))))),
6942               eleTy)),
6943             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6944               NaImm:$Imm)>;
6945   
6946   def : Pat<(i64 (sext
6947               (i32 (vector_extract
6948                 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
6949             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6950               NaImm:$Imm)>; 
6951 }
6952
6953 defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
6954                           neon_uimm3_bare, SMOVxb>;
6955 defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
6956                           neon_uimm2_bare, SMOVxh>;
6957 defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6958                           neon_uimm1_bare, SMOVxs>;
6959
6960 class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
6961                           ValueType eleTy, Operand StImm,  Operand NaImm,
6962                           Instruction SMOVI>
6963   : Pat<(i32 (sext_inreg
6964           (i32 (vector_extract
6965             (NaTy VPR64:$Rn), (NaImm:$Imm))),
6966           eleTy)),
6967         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6968           NaImm:$Imm)>;
6969
6970 def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
6971                          neon_uimm3_bare, SMOVwb>;
6972 def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
6973                          neon_uimm2_bare, SMOVwh>;
6974
6975 class NeonI_UMOV<string asmop, string Res, bit Q,
6976                  ValueType OpTy, Operand OpImm,
6977                  RegisterClass ResGPR, ValueType ResTy>
6978   : NeonI_copy<Q, 0b0, 0b0111,
6979                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
6980                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
6981                [(set (ResTy ResGPR:$Rd),
6982                   (ResTy (vector_extract
6983                     (OpTy VPR128:$Rn), (OpImm:$Imm))))],
6984                NoItinerary> {
6985   bits<4> Imm;
6986 }
6987
6988 //Unsigned integer move (main, from element)
6989 def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
6990                          GPR32, i32> {
6991   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6992 }
6993 def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
6994                          GPR32, i32> {
6995   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6996 }
6997 def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
6998                          GPR32, i32> {
6999   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7000 }
7001 def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
7002                          GPR64, i64> {
7003   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
7004 }
7005
7006 def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
7007                     (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
7008 def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
7009                     (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
7010
7011 class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
7012                          Operand StImm,  Operand NaImm,
7013                          Instruction SMOVI>
7014   : Pat<(ResTy (vector_extract
7015           (NaTy VPR64:$Rn), NaImm:$Imm)),
7016         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7017           NaImm:$Imm)>;
7018
7019 def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
7020                         neon_uimm3_bare, UMOVwb>;
7021 def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
7022                         neon_uimm2_bare, UMOVwh>; 
7023 def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
7024                         neon_uimm1_bare, UMOVws>;
7025
7026 def : Pat<(i32 (and
7027             (i32 (vector_extract
7028               (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
7029             255)),
7030           (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
7031
7032 def : Pat<(i32 (and
7033             (i32 (vector_extract
7034               (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
7035             65535)),
7036           (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
7037
7038 def : Pat<(i64 (zext
7039             (i32 (vector_extract
7040               (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
7041           (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
7042
7043 def : Pat<(i32 (and
7044             (i32 (vector_extract
7045               (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
7046             255)),
7047           (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7048             neon_uimm3_bare:$Imm)>;
7049
7050 def : Pat<(i32 (and
7051             (i32 (vector_extract
7052               (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
7053             65535)),
7054           (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7055             neon_uimm2_bare:$Imm)>;
7056
7057 def : Pat<(i64 (zext
7058             (i32 (vector_extract
7059               (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
7060           (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
7061             neon_uimm0_bare:$Imm)>;
7062
7063 // Additional copy patterns for scalar types
7064 def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
7065           (UMOVwb (v16i8
7066             (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
7067
7068 def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
7069           (UMOVwh (v8i16
7070             (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
7071
7072 def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
7073           (FMOVws FPR32:$Rn)>;
7074
7075 def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
7076           (FMOVxd FPR64:$Rn)>;
7077                
7078 def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
7079           (f64 FPR64:$Rn)>;
7080
7081 def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
7082           (f32 FPR32:$Rn)>;
7083
7084 def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
7085           (v1i8 (EXTRACT_SUBREG (v16i8
7086             (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
7087             sub_8))>;
7088
7089 def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
7090           (v1i16 (EXTRACT_SUBREG (v8i16
7091             (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
7092             sub_16))>;
7093
7094 def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
7095           (FMOVsw $src)>;
7096
7097 def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
7098           (FMOVdx $src)>;
7099
7100 def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$Rn))),
7101           (v1f32 FPR32:$Rn)>;
7102 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
7103           (v1f64 FPR64:$Rn)>;
7104
7105 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
7106           (FMOVdd $src)>;
7107
7108 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
7109           (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
7110                          (f64 FPR64:$src), sub_64)>;
7111
7112 class NeonI_DUP_Elt<bit Q, string asmop, string rdlane,  string rnlane,
7113                     RegisterOperand ResVPR, Operand OpImm>
7114   : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
7115                (ins VPR128:$Rn, OpImm:$Imm),
7116                asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
7117                [],
7118                NoItinerary> {
7119   bits<4> Imm;
7120 }
7121
7122 def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
7123                               neon_uimm4_bare> {
7124   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7125 }
7126
7127 def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
7128                               neon_uimm3_bare> {
7129   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7130 }
7131
7132 def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
7133                               neon_uimm2_bare> {
7134   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7135 }
7136
7137 def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
7138                               neon_uimm1_bare> {
7139   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
7140 }
7141
7142 def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
7143                               neon_uimm4_bare> {
7144   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
7145 }
7146
7147 def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
7148                               neon_uimm3_bare> {
7149   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
7150 }
7151
7152 def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
7153                               neon_uimm2_bare> {
7154   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
7155 }
7156
7157 multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
7158                                        ValueType OpTy,ValueType NaTy,
7159                                        ValueType ExTy, Operand OpLImm,
7160                                        Operand OpNImm> {
7161 def  : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
7162         (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
7163
7164 def : Pat<(ResTy (Neon_vduplane
7165             (NaTy VPR64:$Rn), OpNImm:$Imm)),
7166           (ResTy (DUPELT
7167             (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
7168 }
7169 defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
7170                              neon_uimm4_bare, neon_uimm3_bare>;
7171 defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
7172                              neon_uimm4_bare, neon_uimm3_bare>;
7173 defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
7174                              neon_uimm3_bare, neon_uimm2_bare>;
7175 defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
7176                              neon_uimm3_bare, neon_uimm2_bare>;
7177 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
7178                              neon_uimm2_bare, neon_uimm1_bare>;
7179 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
7180                              neon_uimm2_bare, neon_uimm1_bare>;
7181 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
7182                              neon_uimm1_bare, neon_uimm0_bare>;
7183 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
7184                              neon_uimm2_bare, neon_uimm1_bare>;
7185 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
7186                              neon_uimm2_bare, neon_uimm1_bare>;
7187 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
7188                              neon_uimm1_bare, neon_uimm0_bare>;
7189
7190 def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
7191           (v2f32 (DUPELT2s 
7192             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
7193             (i64 0)))>;
7194 def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
7195           (v4f32 (DUPELT4s 
7196             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
7197             (i64 0)))>;
7198 def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
7199           (v2f64 (DUPELT2d 
7200             (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
7201             (i64 0)))>;
7202
7203 class NeonI_DUP<bit Q, string asmop, string rdlane,
7204                 RegisterOperand ResVPR, ValueType ResTy,
7205                 RegisterClass OpGPR, ValueType OpTy>
7206   : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
7207                asmop # "\t$Rd" # rdlane # ", $Rn",
7208                [(set (ResTy ResVPR:$Rd), 
7209                  (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
7210                NoItinerary>;
7211
7212 def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
7213   let Inst{20-16} = 0b00001;
7214   // bits 17-20 are unspecified, but should be set to zero.
7215 }
7216
7217 def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
7218   let Inst{20-16} = 0b00010;
7219   // bits 18-20 are unspecified, but should be set to zero.
7220 }
7221
7222 def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
7223   let Inst{20-16} = 0b00100;
7224   // bits 19-20 are unspecified, but should be set to zero.
7225 }
7226
7227 def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
7228   let Inst{20-16} = 0b01000;
7229   // bit 20 is unspecified, but should be set to zero.
7230 }
7231
7232 def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
7233   let Inst{20-16} = 0b00001;
7234   // bits 17-20 are unspecified, but should be set to zero.
7235 }
7236
7237 def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
7238   let Inst{20-16} = 0b00010;
7239   // bits 18-20 are unspecified, but should be set to zero.
7240 }
7241
7242 def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
7243   let Inst{20-16} = 0b00100;
7244   // bits 19-20 are unspecified, but should be set to zero.
7245 }
7246
7247 // patterns for CONCAT_VECTORS
7248 multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
7249 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
7250           (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
7251 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
7252           (INSELd 
7253             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7254             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
7255             (i64 1),
7256             (i64 0))>;
7257 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
7258           (DUPELT2d 
7259             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
7260             (i64 0))> ;
7261 }
7262
7263 defm : Concat_Vector_Pattern<v16i8, v8i8>;
7264 defm : Concat_Vector_Pattern<v8i16, v4i16>;
7265 defm : Concat_Vector_Pattern<v4i32, v2i32>;
7266 defm : Concat_Vector_Pattern<v2i64, v1i64>;
7267 defm : Concat_Vector_Pattern<v4f32, v2f32>;
7268 defm : Concat_Vector_Pattern<v2f64, v1f64>;
7269
7270 //patterns for EXTRACT_SUBVECTOR
7271 def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
7272           (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7273 def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
7274           (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7275 def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
7276           (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7277 def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
7278           (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7279 def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
7280           (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7281 def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
7282           (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
7283
7284 class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
7285                 bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
7286                 SDPatternOperator Neon_Rev>
7287   : NeonI_2VMisc<Q, U, size, opcode,
7288                (outs ResVPR:$Rd), (ins ResVPR:$Rn),
7289                asmop # "\t$Rd." # Res # ", $Rn." # Res,
7290                [(set (ResTy ResVPR:$Rd),
7291                   (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
7292                NoItinerary> ;
7293
7294 def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
7295                           v16i8, Neon_rev64>;
7296 def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
7297                          v8i16, Neon_rev64>;
7298 def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
7299                          v4i32, Neon_rev64>;
7300 def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
7301                          v8i8, Neon_rev64>;
7302 def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
7303                          v4i16, Neon_rev64>;
7304 def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
7305                          v2i32, Neon_rev64>;
7306
7307 def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
7308 def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
7309
7310 def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
7311                           v16i8, Neon_rev32>;
7312 def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
7313                           v8i16, Neon_rev32>;
7314 def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
7315                          v8i8, Neon_rev32>;
7316 def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
7317                          v4i16, Neon_rev32>;
7318
7319 def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
7320                           v16i8, Neon_rev16>;
7321 def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
7322                          v8i8, Neon_rev16>;
7323
7324 multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
7325                              SDPatternOperator Neon_Padd> {
7326   def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
7327                            (outs VPR128:$Rd), (ins VPR128:$Rn),
7328                            asmop # "\t$Rd.8h, $Rn.16b",
7329                            [(set (v8i16 VPR128:$Rd),
7330                               (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
7331                            NoItinerary>;
7332   
7333   def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
7334                           (outs VPR64:$Rd), (ins VPR64:$Rn),
7335                           asmop # "\t$Rd.4h, $Rn.8b",
7336                           [(set (v4i16 VPR64:$Rd),
7337                              (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
7338                           NoItinerary>;
7339   
7340   def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7341                            (outs VPR128:$Rd), (ins VPR128:$Rn),
7342                            asmop # "\t$Rd.4s, $Rn.8h",
7343                            [(set (v4i32 VPR128:$Rd),
7344                               (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
7345                            NoItinerary>;
7346   
7347   def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7348                           (outs VPR64:$Rd), (ins VPR64:$Rn),
7349                           asmop # "\t$Rd.2s, $Rn.4h",
7350                           [(set (v2i32 VPR64:$Rd),
7351                              (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
7352                           NoItinerary>;
7353   
7354   def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
7355                            (outs VPR128:$Rd), (ins VPR128:$Rn),
7356                            asmop # "\t$Rd.2d, $Rn.4s",
7357                            [(set (v2i64 VPR128:$Rd),
7358                               (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
7359                            NoItinerary>;
7360   
7361   def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
7362                           (outs VPR64:$Rd), (ins VPR64:$Rn),
7363                           asmop # "\t$Rd.1d, $Rn.2s",
7364                           [(set (v1i64 VPR64:$Rd),
7365                              (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
7366                           NoItinerary>;
7367 }
7368
7369 defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
7370                                 int_arm_neon_vpaddls>;
7371 defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
7372                                 int_arm_neon_vpaddlu>;
7373
7374 multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
7375                              SDPatternOperator Neon_Padd> {
7376   let Constraints = "$src = $Rd" in {
7377     def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
7378                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7379                              asmop # "\t$Rd.8h, $Rn.16b",
7380                              [(set (v8i16 VPR128:$Rd),
7381                                 (v8i16 (Neon_Padd 
7382                                   (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
7383                              NoItinerary>;
7384     
7385     def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
7386                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7387                             asmop # "\t$Rd.4h, $Rn.8b",
7388                             [(set (v4i16 VPR64:$Rd),
7389                                (v4i16 (Neon_Padd 
7390                                  (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
7391                             NoItinerary>;
7392     
7393     def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7394                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7395                             asmop # "\t$Rd.4s, $Rn.8h",
7396                             [(set (v4i32 VPR128:$Rd),
7397                                (v4i32 (Neon_Padd
7398                                  (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
7399                             NoItinerary>;
7400     
7401     def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7402                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7403                             asmop # "\t$Rd.2s, $Rn.4h",
7404                             [(set (v2i32 VPR64:$Rd),
7405                                (v2i32 (Neon_Padd
7406                                  (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
7407                             NoItinerary>;
7408     
7409     def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
7410                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7411                             asmop # "\t$Rd.2d, $Rn.4s",
7412                             [(set (v2i64 VPR128:$Rd),
7413                                (v2i64 (Neon_Padd
7414                                  (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
7415                             NoItinerary>;
7416     
7417     def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
7418                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7419                             asmop # "\t$Rd.1d, $Rn.2s",
7420                             [(set (v1i64 VPR64:$Rd),
7421                                (v1i64 (Neon_Padd
7422                                  (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
7423                             NoItinerary>;
7424   }
7425 }
7426
7427 defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
7428                                    int_arm_neon_vpadals>;
7429 defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
7430                                    int_arm_neon_vpadalu>;
7431
7432 multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
7433   def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
7434                          (outs VPR128:$Rd), (ins VPR128:$Rn),
7435                          asmop # "\t$Rd.16b, $Rn.16b",
7436                          [], NoItinerary>;
7437   
7438   def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
7439                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7440                         asmop # "\t$Rd.8h, $Rn.8h",
7441                         [], NoItinerary>;
7442   
7443   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7444                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7445                         asmop # "\t$Rd.4s, $Rn.4s",
7446                         [], NoItinerary>;
7447   
7448   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
7449                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7450                         asmop # "\t$Rd.2d, $Rn.2d",
7451                         [], NoItinerary>;
7452   
7453   def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
7454                          (outs VPR64:$Rd), (ins VPR64:$Rn),
7455                          asmop # "\t$Rd.8b, $Rn.8b",
7456                          [], NoItinerary>;
7457   
7458   def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
7459                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7460                         asmop # "\t$Rd.4h, $Rn.4h",
7461                         [], NoItinerary>;
7462   
7463   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7464                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7465                         asmop # "\t$Rd.2s, $Rn.2s",
7466                         [], NoItinerary>;
7467 }
7468
7469 defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
7470 defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
7471 defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
7472 defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
7473
7474 multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
7475                                           SDPatternOperator Neon_Op> {
7476   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
7477             (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
7478
7479   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
7480             (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
7481
7482   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
7483             (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
7484
7485   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
7486             (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
7487
7488   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
7489             (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
7490
7491   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
7492             (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
7493
7494   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
7495             (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
7496 }
7497
7498 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
7499 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
7500 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
7501
7502 def : Pat<(v16i8 (sub 
7503             (v16i8 Neon_AllZero),
7504             (v16i8 VPR128:$Rn))),
7505           (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
7506 def : Pat<(v8i8 (sub 
7507             (v8i8 Neon_AllZero),
7508             (v8i8 VPR64:$Rn))),
7509           (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
7510 def : Pat<(v8i16 (sub 
7511             (v8i16 (bitconvert (v16i8 Neon_AllZero))),
7512             (v8i16 VPR128:$Rn))),
7513           (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
7514 def : Pat<(v4i16 (sub 
7515             (v4i16 (bitconvert (v8i8 Neon_AllZero))),
7516             (v4i16 VPR64:$Rn))),
7517           (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
7518 def : Pat<(v4i32 (sub 
7519             (v4i32 (bitconvert (v16i8 Neon_AllZero))),
7520             (v4i32 VPR128:$Rn))),
7521           (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
7522 def : Pat<(v2i32 (sub 
7523             (v2i32 (bitconvert (v8i8 Neon_AllZero))),
7524             (v2i32 VPR64:$Rn))),
7525           (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
7526 def : Pat<(v2i64 (sub 
7527             (v2i64 (bitconvert (v16i8 Neon_AllZero))),
7528             (v2i64 VPR128:$Rn))),
7529           (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
7530
7531 multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
7532   let Constraints = "$src = $Rd" in {
7533     def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
7534                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7535                            asmop # "\t$Rd.16b, $Rn.16b",
7536                            [], NoItinerary>;
7537     
7538     def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
7539                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7540                           asmop # "\t$Rd.8h, $Rn.8h",
7541                           [], NoItinerary>;
7542     
7543     def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7544                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7545                           asmop # "\t$Rd.4s, $Rn.4s",
7546                           [], NoItinerary>;
7547     
7548     def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
7549                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7550                           asmop # "\t$Rd.2d, $Rn.2d",
7551                           [], NoItinerary>;
7552     
7553     def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
7554                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7555                           asmop # "\t$Rd.8b, $Rn.8b",
7556                           [], NoItinerary>;
7557     
7558     def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
7559                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7560                           asmop # "\t$Rd.4h, $Rn.4h",
7561                           [], NoItinerary>;
7562     
7563     def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7564                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7565                           asmop # "\t$Rd.2s, $Rn.2s",
7566                           [], NoItinerary>;
7567   }
7568 }
7569
7570 defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
7571 defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
7572
7573 multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
7574                                            SDPatternOperator Neon_Op> {
7575   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
7576             (v16i8 (!cast<Instruction>(Prefix # 16b)
7577               (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
7578
7579   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
7580             (v8i16 (!cast<Instruction>(Prefix # 8h)
7581               (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
7582
7583   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
7584             (v4i32 (!cast<Instruction>(Prefix # 4s)
7585               (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
7586
7587   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
7588             (v2i64 (!cast<Instruction>(Prefix # 2d)
7589               (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
7590
7591   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
7592             (v8i8 (!cast<Instruction>(Prefix # 8b)
7593               (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
7594
7595   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
7596             (v4i16 (!cast<Instruction>(Prefix # 4h)
7597               (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
7598
7599   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
7600             (v2i32 (!cast<Instruction>(Prefix # 2s)
7601               (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
7602 }
7603
7604 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
7605 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
7606
7607 multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
7608                           SDPatternOperator Neon_Op> {
7609   def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
7610                          (outs VPR128:$Rd), (ins VPR128:$Rn),
7611                          asmop # "\t$Rd.16b, $Rn.16b",
7612                          [(set (v16i8 VPR128:$Rd),
7613                             (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
7614                          NoItinerary>;
7615   
7616   def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
7617                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7618                         asmop # "\t$Rd.8h, $Rn.8h",
7619                         [(set (v8i16 VPR128:$Rd),
7620                            (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
7621                         NoItinerary>;
7622   
7623   def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
7624                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7625                         asmop # "\t$Rd.4s, $Rn.4s",
7626                         [(set (v4i32 VPR128:$Rd),
7627                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
7628                         NoItinerary>;
7629   
7630   def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
7631                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7632                         asmop # "\t$Rd.8b, $Rn.8b",
7633                         [(set (v8i8 VPR64:$Rd),
7634                            (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
7635                         NoItinerary>;
7636   
7637   def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
7638                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7639                         asmop # "\t$Rd.4h, $Rn.4h",
7640                         [(set (v4i16 VPR64:$Rd),
7641                            (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
7642                         NoItinerary>;
7643   
7644   def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
7645                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7646                         asmop # "\t$Rd.2s, $Rn.2s",
7647                         [(set (v2i32 VPR64:$Rd),
7648                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
7649                         NoItinerary>;
7650 }
7651
7652 defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
7653 defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
7654
7655 multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
7656                               bits<5> Opcode> {
7657   def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
7658                          (outs VPR128:$Rd), (ins VPR128:$Rn),
7659                          asmop # "\t$Rd.16b, $Rn.16b",
7660                          [], NoItinerary>;
7661   
7662   def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
7663                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7664                         asmop # "\t$Rd.8b, $Rn.8b",
7665                         [], NoItinerary>;
7666 }
7667
7668 defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
7669 defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
7670 defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
7671
7672 def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
7673                     (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
7674 def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
7675                     (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
7676
7677 def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
7678           (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
7679 def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
7680           (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
7681
7682 def : Pat<(v16i8 (xor 
7683             (v16i8 VPR128:$Rn),
7684             (v16i8 Neon_AllOne))),
7685           (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
7686 def : Pat<(v8i8 (xor 
7687             (v8i8 VPR64:$Rn),
7688             (v8i8 Neon_AllOne))),
7689           (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
7690 def : Pat<(v8i16 (xor 
7691             (v8i16 VPR128:$Rn),
7692             (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
7693           (NOT16b VPR128:$Rn)>;
7694 def : Pat<(v4i16 (xor 
7695             (v4i16 VPR64:$Rn),
7696             (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
7697           (NOT8b VPR64:$Rn)>;
7698 def : Pat<(v4i32 (xor 
7699             (v4i32 VPR128:$Rn),
7700             (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
7701           (NOT16b VPR128:$Rn)>;
7702 def : Pat<(v2i32 (xor 
7703             (v2i32 VPR64:$Rn),
7704             (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
7705           (NOT8b VPR64:$Rn)>;
7706 def : Pat<(v2i64 (xor 
7707             (v2i64 VPR128:$Rn),
7708             (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
7709           (NOT16b VPR128:$Rn)>;
7710
7711 def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
7712           (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
7713 def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
7714           (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
7715
7716 multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
7717                                 SDPatternOperator Neon_Op> {
7718   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7719                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7720                         asmop # "\t$Rd.4s, $Rn.4s",
7721                         [(set (v4f32 VPR128:$Rd),
7722                            (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
7723                         NoItinerary>;
7724   
7725   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
7726                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7727                         asmop # "\t$Rd.2d, $Rn.2d",
7728                         [(set (v2f64 VPR128:$Rd),
7729                            (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
7730                         NoItinerary>;
7731   
7732   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7733                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7734                         asmop # "\t$Rd.2s, $Rn.2s",
7735                         [(set (v2f32 VPR64:$Rd),
7736                            (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
7737                         NoItinerary>;
7738 }
7739
7740 defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
7741 defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
7742
7743 multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
7744   def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
7745                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7746                           asmop # "\t$Rd.8b, $Rn.8h",
7747                           [], NoItinerary>;
7748
7749   def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
7750                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7751                           asmop # "\t$Rd.4h, $Rn.4s",
7752                           [], NoItinerary>;
7753
7754   def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7755                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7756                           asmop # "\t$Rd.2s, $Rn.2d",
7757                           [], NoItinerary>;
7758
7759   let Constraints = "$Rd = $src" in {
7760     def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
7761                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7762                              asmop # "2\t$Rd.16b, $Rn.8h",
7763                              [], NoItinerary>;
7764   
7765     def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
7766                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7767                             asmop # "2\t$Rd.8h, $Rn.4s",
7768                             [], NoItinerary>;
7769   
7770     def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7771                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7772                             asmop # "2\t$Rd.4s, $Rn.2d",
7773                             [], NoItinerary>;
7774   }
7775 }
7776
7777 defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
7778 defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
7779 defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
7780 defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
7781
7782 multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix, 
7783                                         SDPatternOperator Neon_Op> {
7784   def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
7785             (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
7786
7787   def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
7788             (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
7789
7790   def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
7791             (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
7792   
7793   def : Pat<(v16i8 (concat_vectors
7794               (v8i8 VPR64:$src),
7795               (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
7796             (!cast<Instruction>(Prefix # 8h16b) 
7797               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
7798               VPR128:$Rn)>;
7799
7800   def : Pat<(v8i16 (concat_vectors
7801               (v4i16 VPR64:$src),
7802               (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
7803             (!cast<Instruction>(Prefix # 4s8h)
7804               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
7805               VPR128:$Rn)>;
7806
7807   def : Pat<(v4i32 (concat_vectors
7808               (v2i32 VPR64:$src),
7809               (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
7810             (!cast<Instruction>(Prefix # 2d4s)
7811               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
7812               VPR128:$Rn)>;
7813 }
7814
7815 defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
7816 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
7817 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
7818 defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
7819
7820 multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
7821   def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
7822                           (outs VPR128:$Rd),
7823                           (ins VPR64:$Rn, uimm_exact8:$Imm),
7824                           asmop # "\t$Rd.8h, $Rn.8b, $Imm",
7825                           [], NoItinerary>;
7826
7827   def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7828                           (outs VPR128:$Rd),
7829                           (ins VPR64:$Rn, uimm_exact16:$Imm),
7830                           asmop # "\t$Rd.4s, $Rn.4h, $Imm",
7831                           [], NoItinerary>;
7832
7833   def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
7834                           (outs VPR128:$Rd),
7835                           (ins VPR64:$Rn, uimm_exact32:$Imm),
7836                           asmop # "\t$Rd.2d, $Rn.2s, $Imm",
7837                           [], NoItinerary>;
7838
7839   def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
7840                           (outs VPR128:$Rd),
7841                           (ins VPR128:$Rn, uimm_exact8:$Imm),
7842                           asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
7843                           [], NoItinerary>;
7844
7845   def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7846                           (outs VPR128:$Rd),
7847                           (ins VPR128:$Rn, uimm_exact16:$Imm),
7848                           asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
7849                           [], NoItinerary>;
7850
7851   def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
7852                           (outs VPR128:$Rd),
7853                           (ins VPR128:$Rn, uimm_exact32:$Imm),
7854                           asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
7855                           [], NoItinerary>;
7856 }
7857
7858 defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
7859
7860 class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
7861                           SDPatternOperator ExtOp, Operand Neon_Imm,
7862                           string suffix> 
7863   : Pat<(DesTy (shl
7864           (DesTy (ExtOp (OpTy VPR64:$Rn))),
7865             (DesTy (Neon_vdup
7866               (i32 Neon_Imm:$Imm))))),
7867         (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
7868     
7869 class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
7870                                SDPatternOperator ExtOp, Operand Neon_Imm,
7871                                string suffix, PatFrag GetHigh> 
7872   : Pat<(DesTy (shl
7873           (DesTy (ExtOp
7874             (OpTy (GetHigh VPR128:$Rn)))),
7875               (DesTy (Neon_vdup
7876                 (i32 Neon_Imm:$Imm))))),
7877         (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
7878
7879 def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
7880 def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
7881 def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
7882 def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
7883 def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
7884 def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
7885 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
7886                                Neon_High16B>;
7887 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
7888                                Neon_High16B>;
7889 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
7890                                Neon_High8H>;
7891 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
7892                                Neon_High8H>;
7893 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
7894                                Neon_High4S>;
7895 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
7896                                Neon_High4S>;
7897
7898 multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
7899   def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
7900                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7901                           asmop # "\t$Rd.4h, $Rn.4s",
7902                           [], NoItinerary>;
7903
7904   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7905                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7906                           asmop # "\t$Rd.2s, $Rn.2d",
7907                           [], NoItinerary>;
7908   
7909   let Constraints = "$src = $Rd" in {
7910     def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
7911                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7912                             asmop # "2\t$Rd.8h, $Rn.4s",
7913                             [], NoItinerary>;
7914   
7915     def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7916                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7917                             asmop # "2\t$Rd.4s, $Rn.2d",
7918                             [], NoItinerary>;
7919   }
7920 }
7921
7922 defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
7923
7924 multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
7925                                        SDPatternOperator f32_to_f16_Op,
7926                                        SDPatternOperator f64_to_f32_Op> {
7927   
7928   def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
7929               (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
7930   
7931   def : Pat<(v8i16 (concat_vectors
7932                 (v4i16 VPR64:$src),
7933                 (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
7934                   (!cast<Instruction>(prefix # "4s8h")
7935                     (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
7936                     (v4f32 VPR128:$Rn))>;  
7937     
7938   def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
7939             (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
7940   
7941   def : Pat<(v4f32 (concat_vectors
7942               (v2f32 VPR64:$src),
7943               (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
7944                 (!cast<Instruction>(prefix # "2d4s")
7945                   (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
7946                   (v2f64 VPR128:$Rn))>;
7947 }
7948
7949 defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
7950
7951 multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
7952                                  bits<5> opcode> {
7953   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7954                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7955                           asmop # "\t$Rd.2s, $Rn.2d",
7956                           [], NoItinerary>;
7957
7958   def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7959                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7960                           asmop # "2\t$Rd.4s, $Rn.2d",
7961                           [], NoItinerary> {
7962     let Constraints = "$src = $Rd";
7963   }
7964   
7965   def : Pat<(v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))),
7966             (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
7967
7968   def : Pat<(v4f32 (concat_vectors
7969               (v2f32 VPR64:$src),
7970               (v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))))),
7971             (!cast<Instruction>(prefix # "2d4s")
7972                (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
7973                VPR128:$Rn)>;
7974 }
7975
7976 defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
7977
7978 def Neon_High4Float : PatFrag<(ops node:$in),
7979                               (extract_subvector (v4f32 node:$in), (iPTR 2))>;
7980
7981 multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
7982   def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
7983                           (outs VPR128:$Rd), (ins VPR64:$Rn),
7984                           asmop # "\t$Rd.4s, $Rn.4h",
7985                           [], NoItinerary>;
7986
7987   def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
7988                           (outs VPR128:$Rd), (ins VPR64:$Rn),
7989                           asmop # "\t$Rd.2d, $Rn.2s",
7990                           [], NoItinerary>;
7991
7992   def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
7993                           (outs VPR128:$Rd), (ins VPR128:$Rn),
7994                           asmop # "2\t$Rd.4s, $Rn.8h",
7995                           [], NoItinerary>;
7996
7997   def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
7998                           (outs VPR128:$Rd), (ins VPR128:$Rn),
7999                           asmop # "2\t$Rd.2d, $Rn.4s",
8000                           [], NoItinerary>;
8001 }
8002
8003 defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
8004
8005 multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
8006   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
8007             (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
8008   
8009   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
8010               (v4i16 (Neon_High8H
8011                 (v8i16 VPR128:$Rn))))),
8012             (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
8013   
8014   def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
8015             (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
8016   
8017   def : Pat<(v2f64 (fextend
8018               (v2f32 (Neon_High4Float
8019                 (v4f32 VPR128:$Rn))))),
8020             (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
8021 }
8022
8023 defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
8024
8025 multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
8026                                 ValueType ResTy4s, ValueType OpTy4s,
8027                                 ValueType ResTy2d, ValueType OpTy2d,
8028                                 ValueType ResTy2s, ValueType OpTy2s,
8029                                 SDPatternOperator Neon_Op> {
8030   
8031   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
8032                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8033                         asmop # "\t$Rd.4s, $Rn.4s",
8034                         [(set (ResTy4s VPR128:$Rd),
8035                            (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
8036                         NoItinerary>;
8037
8038   def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
8039                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8040                         asmop # "\t$Rd.2d, $Rn.2d",
8041                         [(set (ResTy2d VPR128:$Rd),
8042                            (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
8043                         NoItinerary>;
8044   
8045   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
8046                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8047                         asmop # "\t$Rd.2s, $Rn.2s",
8048                         [(set (ResTy2s VPR64:$Rd),
8049                            (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
8050                         NoItinerary>;
8051 }
8052
8053 multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
8054                                   bits<5> opcode, SDPatternOperator Neon_Op> {
8055   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
8056                                 v2f64, v2i32, v2f32, Neon_Op>;
8057 }
8058
8059 defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
8060                                      int_aarch64_neon_fcvtns>;
8061 defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
8062                                      int_aarch64_neon_fcvtnu>;
8063 defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
8064                                      int_aarch64_neon_fcvtps>;
8065 defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
8066                                      int_aarch64_neon_fcvtpu>;
8067 defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
8068                                      int_aarch64_neon_fcvtms>;
8069 defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
8070                                      int_aarch64_neon_fcvtmu>;
8071 defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
8072 defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
8073 defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
8074                                      int_aarch64_neon_fcvtas>;
8075 defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
8076                                      int_aarch64_neon_fcvtau>;
8077
8078 multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
8079                                   bits<5> opcode, SDPatternOperator Neon_Op> {
8080   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
8081                                 v2i64, v2f32, v2i32, Neon_Op>;
8082 }
8083
8084 defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
8085 defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
8086
8087 multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
8088                                  bits<5> opcode, SDPatternOperator Neon_Op> {
8089   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
8090                                 v2f64, v2f32, v2f32, Neon_Op>;
8091 }
8092
8093 defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
8094                                      int_aarch64_neon_frintn>;
8095 defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
8096 defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
8097 defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
8098 defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
8099 defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
8100 defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
8101 defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
8102                                     int_arm_neon_vrecpe>;
8103 defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
8104                                      int_arm_neon_vrsqrte>;
8105 defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111,
8106                                    int_aarch64_neon_fsqrt>;
8107
8108 multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
8109                                bits<5> opcode, SDPatternOperator Neon_Op> {
8110   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
8111                         (outs VPR128:$Rd), (ins VPR128:$Rn),
8112                         asmop # "\t$Rd.4s, $Rn.4s",
8113                         [(set (v4i32 VPR128:$Rd),
8114                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
8115                         NoItinerary>;
8116   
8117   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
8118                         (outs VPR64:$Rd), (ins VPR64:$Rn),
8119                         asmop # "\t$Rd.2s, $Rn.2s",
8120                         [(set (v2i32 VPR64:$Rd),
8121                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
8122                         NoItinerary>;
8123 }
8124
8125 defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
8126                                   int_arm_neon_vrecpe>;
8127 defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
8128                                    int_arm_neon_vrsqrte>;
8129
8130 // Crypto Class
8131 class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
8132                          string asmop, SDPatternOperator opnode>
8133   : NeonI_Crypto_AES<size, opcode,
8134                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8135                      asmop # "\t$Rd.16b, $Rn.16b",
8136                      [(set (v16i8 VPR128:$Rd),
8137                         (v16i8 (opnode (v16i8 VPR128:$src),
8138                                        (v16i8 VPR128:$Rn))))],
8139                      NoItinerary>{
8140   let Constraints = "$src = $Rd";
8141   let Predicates = [HasNEON, HasCrypto];
8142 }
8143
8144 def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
8145 def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
8146
8147 class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
8148                       string asmop, SDPatternOperator opnode>
8149   : NeonI_Crypto_AES<size, opcode,
8150                      (outs VPR128:$Rd), (ins VPR128:$Rn),
8151                      asmop # "\t$Rd.16b, $Rn.16b",
8152                      [(set (v16i8 VPR128:$Rd),
8153                         (v16i8 (opnode (v16i8 VPR128:$Rn))))],
8154                      NoItinerary>;
8155
8156 def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
8157 def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
8158
8159 class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
8160                          string asmop, SDPatternOperator opnode>
8161   : NeonI_Crypto_SHA<size, opcode,
8162                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
8163                      asmop # "\t$Rd.4s, $Rn.4s",
8164                      [(set (v4i32 VPR128:$Rd),
8165                         (v4i32 (opnode (v4i32 VPR128:$src),
8166                                        (v4i32 VPR128:$Rn))))],
8167                      NoItinerary> {
8168   let Constraints = "$src = $Rd";
8169   let Predicates = [HasNEON, HasCrypto];
8170 }
8171
8172 def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
8173                                  int_arm_neon_sha1su1>;
8174 def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
8175                                    int_arm_neon_sha256su0>;
8176
8177 class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
8178                          string asmop, SDPatternOperator opnode>
8179   : NeonI_Crypto_SHA<size, opcode,
8180                      (outs FPR32:$Rd), (ins FPR32:$Rn),
8181                      asmop # "\t$Rd, $Rn",
8182                      [(set (v1i32 FPR32:$Rd),
8183                         (v1i32 (opnode (v1i32 FPR32:$Rn))))],
8184                      NoItinerary> {
8185   let Predicates = [HasNEON, HasCrypto];
8186 }
8187
8188 def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
8189
8190 class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
8191                            SDPatternOperator opnode>
8192   : NeonI_Crypto_3VSHA<size, opcode,
8193                        (outs VPR128:$Rd),
8194                        (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
8195                        asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
8196                        [(set (v4i32 VPR128:$Rd),
8197                           (v4i32 (opnode (v4i32 VPR128:$src),
8198                                          (v4i32 VPR128:$Rn),
8199                                          (v4i32 VPR128:$Rm))))],
8200                        NoItinerary> {
8201   let Constraints = "$src = $Rd";
8202   let Predicates = [HasNEON, HasCrypto];
8203 }
8204
8205 def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
8206                                    int_arm_neon_sha1su0>;
8207 def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
8208                                      int_arm_neon_sha256su1>;
8209
8210 class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
8211                            SDPatternOperator opnode>
8212   : NeonI_Crypto_3VSHA<size, opcode,
8213                        (outs FPR128:$Rd),
8214                        (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
8215                        asmop # "\t$Rd, $Rn, $Rm.4s",
8216                        [(set (v4i32 FPR128:$Rd),
8217                           (v4i32 (opnode (v4i32 FPR128:$src),
8218                                          (v4i32 FPR128:$Rn),
8219                                          (v4i32 VPR128:$Rm))))],
8220                        NoItinerary> {
8221   let Constraints = "$src = $Rd";
8222   let Predicates = [HasNEON, HasCrypto];
8223 }
8224
8225 def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
8226                                    int_arm_neon_sha256h>;
8227 def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
8228                                     int_arm_neon_sha256h2>;
8229
8230 class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop,
8231                            SDPatternOperator opnode>
8232   : NeonI_Crypto_3VSHA<size, opcode,
8233                        (outs FPR128:$Rd),
8234                        (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
8235                        asmop # "\t$Rd, $Rn, $Rm.4s",
8236                        [(set (v4i32 FPR128:$Rd),
8237                           (v4i32 (opnode (v4i32 FPR128:$src),
8238                                          (v1i32 FPR32:$Rn),
8239                                          (v4i32 VPR128:$Rm))))],
8240                        NoItinerary> {
8241   let Constraints = "$src = $Rd";
8242   let Predicates = [HasNEON, HasCrypto];
8243 }
8244
8245 def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c", int_aarch64_neon_sha1c>;
8246 def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p", int_aarch64_neon_sha1p>;
8247 def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m", int_aarch64_neon_sha1m>;
8248