fb6d65450de6c60cd6d933c1b93c64076ffd984b
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrNEON.td
1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the AArch64 NEON instruction set.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17 def Neon_bsl       : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
18                       [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
19                       SDTCisSameAs<0, 3>]>>;
20
21 // (outs Result), (ins Imm, OpCmode)
22 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
23
24 def Neon_movi     : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
25
26 def Neon_mvni     : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
27
28 // (outs Result), (ins Imm)
29 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
30                         [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
31
32 // (outs Result), (ins LHS, RHS, CondCode)
33 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
34                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
35
36 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
37 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
38                  [SDTCisVec<0>,  SDTCisVec<1>]>>;
39
40 // (outs Result), (ins LHS, RHS)
41 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
42                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
43
44 def Neon_dupImm : SDNode<"AArch64ISD::NEON_DUPIMM", SDTypeProfile<1, 1, 
45                     [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
46
47 //===----------------------------------------------------------------------===//
48 // Multiclasses
49 //===----------------------------------------------------------------------===//
50
51 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size,  bits<5> opcode,
52                                 string asmop, SDPatternOperator opnode8B,
53                                 SDPatternOperator opnode16B,
54                                 bit Commutable = 0>
55 {
56   let isCommutable = Commutable in {
57     def _8B :  NeonI_3VSame<0b0, u, size, opcode,
58                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
59                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
60                [(set (v8i8 VPR64:$Rd),
61                   (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
62                NoItinerary>;
63
64     def _16B : NeonI_3VSame<0b1, u, size, opcode,
65                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
66                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
67                [(set (v16i8 VPR128:$Rd),
68                   (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
69                NoItinerary>;
70   }
71
72 }
73
74 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
75                                   string asmop, SDPatternOperator opnode,
76                                   bit Commutable = 0>
77 {
78   let isCommutable = Commutable in {
79     def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
80               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
81               asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
82               [(set (v4i16 VPR64:$Rd),
83                  (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
84               NoItinerary>;
85
86     def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
87               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
88               asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
89               [(set (v8i16 VPR128:$Rd),
90                  (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
91               NoItinerary>;
92
93     def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
94               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
95               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
96               [(set (v2i32 VPR64:$Rd),
97                  (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
98               NoItinerary>;
99
100     def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
101               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
102               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
103               [(set (v4i32 VPR128:$Rd),
104                  (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
105               NoItinerary>;
106   }
107 }
108 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
109                                   string asmop, SDPatternOperator opnode,
110                                   bit Commutable = 0>
111    : NeonI_3VSame_HS_sizes<u, opcode,  asmop, opnode, Commutable>
112 {
113   let isCommutable = Commutable in {
114     def _8B :  NeonI_3VSame<0b0, u, 0b00, opcode,
115                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
116                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
117                [(set (v8i8 VPR64:$Rd),
118                   (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
119                NoItinerary>;
120
121     def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
122                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
123                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
124                [(set (v16i8 VPR128:$Rd),
125                   (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
126                NoItinerary>;
127   }
128 }
129
130 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
131                                    string asmop, SDPatternOperator opnode,
132                                    bit Commutable = 0>
133    : NeonI_3VSame_BHS_sizes<u, opcode,  asmop, opnode, Commutable>
134 {
135   let isCommutable = Commutable in {
136     def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
137               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
138               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
139               [(set (v2i64 VPR128:$Rd),
140                  (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
141               NoItinerary>;
142   }
143 }
144
145 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
146 // but Result types can be integer or floating point types.
147 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
148                                  string asmop, SDPatternOperator opnode2S,
149                                  SDPatternOperator opnode4S,
150                                  SDPatternOperator opnode2D,
151                                  ValueType ResTy2S, ValueType ResTy4S,
152                                  ValueType ResTy2D, bit Commutable = 0>
153 {
154   let isCommutable = Commutable in {
155     def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
156               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
157               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
158               [(set (ResTy2S VPR64:$Rd),
159                  (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
160               NoItinerary>;
161
162     def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
163               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
164               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
165               [(set (ResTy4S VPR128:$Rd),
166                  (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
167               NoItinerary>;
168
169     def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
170               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
171               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
172               [(set (ResTy2D VPR128:$Rd),
173                  (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
174                NoItinerary>;
175   }
176 }
177
178 //===----------------------------------------------------------------------===//
179 // Instruction Definitions
180 //===----------------------------------------------------------------------===//
181
182 // Vector Arithmetic Instructions
183
184 // Vector Add (Integer and Floating-Point)
185
186 defm ADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
187 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
188                                      v2f32, v4f32, v2f64, 1>;
189
190 // Vector Sub (Integer and Floating-Point)
191
192 defm SUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
193 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
194                                      v2f32, v4f32, v2f64, 0>;
195
196 // Vector Multiply (Integer and Floating-Point)
197
198 defm MULvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
199 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
200                                      v2f32, v4f32, v2f64, 1>;
201
202 // Vector Multiply (Polynomial)
203
204 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
205                                     int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
206
207 // Vector Multiply-accumulate and Multiply-subtract (Integer)
208
209 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
210 // two operands constraints.
211 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
212   RegisterClass VPRC, ValueType OpTy, bit q, bit u, bits<2> size, bits<5> opcode,
213   SDPatternOperator opnode>
214   : NeonI_3VSame<q, u, size, opcode,
215     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
216     asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
217     [(set (OpTy VPRC:$Rd),
218        (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
219     NoItinerary> {
220   let Constraints = "$src = $Rd";
221 }
222
223 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
224                        (add node:$Ra, (mul node:$Rn, node:$Rm))>;
225
226 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
227                        (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
228
229
230 def MLAvvv_8B:  NeonI_3VSame_Constraint_impl<"mla", ".8b",  VPR64,  v8i8,
231                                              0b0, 0b0, 0b00, 0b10010, Neon_mla>;
232 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
233                                              0b1, 0b0, 0b00, 0b10010, Neon_mla>;
234 def MLAvvv_4H:  NeonI_3VSame_Constraint_impl<"mla", ".4h",  VPR64,  v4i16,
235                                              0b0, 0b0, 0b01, 0b10010, Neon_mla>;
236 def MLAvvv_8H:  NeonI_3VSame_Constraint_impl<"mla", ".8h",  VPR128, v8i16,
237                                              0b1, 0b0, 0b01, 0b10010, Neon_mla>;
238 def MLAvvv_2S:  NeonI_3VSame_Constraint_impl<"mla", ".2s",  VPR64,  v2i32,
239                                              0b0, 0b0, 0b10, 0b10010, Neon_mla>;
240 def MLAvvv_4S:  NeonI_3VSame_Constraint_impl<"mla", ".4s",  VPR128, v4i32,
241                                              0b1, 0b0, 0b10, 0b10010, Neon_mla>;
242
243 def MLSvvv_8B:  NeonI_3VSame_Constraint_impl<"mls", ".8b",  VPR64,  v8i8,
244                                              0b0, 0b1, 0b00, 0b10010, Neon_mls>;
245 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
246                                              0b1, 0b1, 0b00, 0b10010, Neon_mls>;
247 def MLSvvv_4H:  NeonI_3VSame_Constraint_impl<"mls", ".4h",  VPR64,  v4i16,
248                                              0b0, 0b1, 0b01, 0b10010, Neon_mls>;
249 def MLSvvv_8H:  NeonI_3VSame_Constraint_impl<"mls", ".8h",  VPR128, v8i16,
250                                              0b1, 0b1, 0b01, 0b10010, Neon_mls>;
251 def MLSvvv_2S:  NeonI_3VSame_Constraint_impl<"mls", ".2s",  VPR64,  v2i32,
252                                              0b0, 0b1, 0b10, 0b10010, Neon_mls>;
253 def MLSvvv_4S:  NeonI_3VSame_Constraint_impl<"mls", ".4s",  VPR128, v4i32,
254                                              0b1, 0b1, 0b10, 0b10010, Neon_mls>;
255
256 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
257
258 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
259                         (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
260
261 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
262                         (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
263
264 let Predicates = [HasNEON, UseFusedMAC] in {
265 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s",  VPR64,  v2f32,
266                                              0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
267 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s",  VPR128, v4f32,
268                                              0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
269 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d",  VPR128, v2f64,
270                                              0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
271
272 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s",  VPR64,  v2f32,
273                                               0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
274 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s",  VPR128, v4f32,
275                                              0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
276 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d",  VPR128, v2f64,
277                                              0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
278 }
279
280 // We're also allowed to match the fma instruction regardless of compile
281 // options.
282 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
283           (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
284 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
285           (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
286 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
287           (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
288
289 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
290           (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
291 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
292           (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
293 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
294           (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
295
296 // Vector Divide (Floating-Point)
297
298 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
299                                      v2f32, v4f32, v2f64, 0>;
300
301 // Vector Bitwise Operations
302
303 // Vector Bitwise AND
304
305 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
306
307 // Vector Bitwise Exclusive OR
308
309 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
310
311 // Vector Bitwise OR
312
313 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
314
315 // ORR disassembled as MOV if Vn==Vm
316
317 // Vector Move - register
318 // Alias for ORR if Vn=Vm and it is the preferred syntax
319 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
320                     (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>;
321 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
322                     (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>;
323
324 def Neon_immAllOnes: PatLeaf<(Neon_movi (i32 timm), (i32 imm)), [{
325   ConstantSDNode *ImmConstVal = cast<ConstantSDNode>(N->getOperand(0));
326   ConstantSDNode *OpCmodeConstVal = cast<ConstantSDNode>(N->getOperand(1));
327   unsigned EltBits;
328   uint64_t EltVal = A64Imms::decodeNeonModImm(ImmConstVal->getZExtValue(),
329     OpCmodeConstVal->getZExtValue(), EltBits);
330   return (EltBits == 8 && EltVal == 0xff);
331 }]>;
332
333
334 def Neon_not8B  : PatFrag<(ops node:$in),
335                           (xor node:$in, (bitconvert (v8i8 Neon_immAllOnes)))>;
336 def Neon_not16B : PatFrag<(ops node:$in),
337                           (xor node:$in, (bitconvert (v16i8 Neon_immAllOnes)))>;
338
339 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
340                          (or node:$Rn, (Neon_not8B node:$Rm))>;
341
342 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
343                           (or node:$Rn, (Neon_not16B node:$Rm))>;
344
345 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
346                          (and node:$Rn, (Neon_not8B node:$Rm))>;
347
348 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
349                           (and node:$Rn, (Neon_not16B node:$Rm))>;
350
351
352 // Vector Bitwise OR NOT - register
353
354 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
355                                    Neon_orn8B, Neon_orn16B, 0>;
356
357 // Vector Bitwise Bit Clear (AND NOT) - register
358
359 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
360                                    Neon_bic8B, Neon_bic16B, 0>;
361
362 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
363                                    SDPatternOperator opnode16B,
364                                    Instruction INST8B,
365                                    Instruction INST16B> {
366   def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
367             (INST8B VPR64:$Rn, VPR64:$Rm)>;
368   def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
369             (INST8B VPR64:$Rn, VPR64:$Rm)>;
370   def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
371             (INST8B VPR64:$Rn, VPR64:$Rm)>;
372   def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
373             (INST16B VPR128:$Rn, VPR128:$Rm)>;
374   def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
375             (INST16B VPR128:$Rn, VPR128:$Rm)>;
376   def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
377             (INST16B VPR128:$Rn, VPR128:$Rm)>;
378 }
379
380 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
381 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
382 defm : Neon_bitwise2V_patterns<or,  or,  ORRvvv_8B, ORRvvv_16B>;
383 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
384 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
385 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
386
387 //   Vector Bitwise Select
388 def BSLvvv_8B  : NeonI_3VSame_Constraint_impl<"bsl", ".8b",  VPR64, v8i8,
389                                               0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
390
391 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
392                                               0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
393
394 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
395                                    Instruction INST8B,
396                                    Instruction INST16B> {
397   // Disassociate type from instruction definition
398   def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
399             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
400   def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
401             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
402   def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
403             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
404   def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
405             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
406   def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
407             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
408   def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
409             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
410
411   // Allow to match BSL instruction pattern with non-constant operand
412   def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
413                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
414           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
415   def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
416                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
417           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
418   def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
419                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
420           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
421   def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
422                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
423           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
424   def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
425                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
426           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
427   def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
428                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
429           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
430   def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
431                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
432           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
433   def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
434                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
435           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
436
437   // Allow to match llvm.arm.* intrinsics.
438   def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
439                     (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
440             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
441   def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
442                     (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
443             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
444   def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
445                     (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
446             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
447   def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
448                     (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
449             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
450   def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
451                     (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
452             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
453   def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
454                     (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
455             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
456   def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
457                     (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
458             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
459   def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
460                     (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
461             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
462   def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
463                     (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
464             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
465   def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
466                     (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
467             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
468   def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
469                     (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
470             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
471 }
472
473 // Additional patterns for bitwise instruction BSL
474 defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
475
476 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
477                            (Neon_bsl node:$src, node:$Rn, node:$Rm),
478                            [{ (void)N; return false; }]>;
479
480 // Vector Bitwise Insert if True
481
482 def BITvvv_8B  : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64,   v8i8,
483                    0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
484 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
485                    0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
486
487 // Vector Bitwise Insert if False
488
489 def BIFvvv_8B  : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64,  v8i8,
490                                 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
491 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
492                                 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
493
494 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
495
496 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
497                        (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
498 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
499                        (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
500
501 // Vector Absolute Difference and Accumulate (Unsigned)
502 def UABAvvv_8B :  NeonI_3VSame_Constraint_impl<"uaba", ".8b",  VPR64,  v8i8,
503                     0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
504 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
505                     0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
506 def UABAvvv_4H :  NeonI_3VSame_Constraint_impl<"uaba", ".4h",  VPR64,  v4i16,
507                     0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
508 def UABAvvv_8H :  NeonI_3VSame_Constraint_impl<"uaba", ".8h",  VPR128, v8i16,
509                     0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
510 def UABAvvv_2S :  NeonI_3VSame_Constraint_impl<"uaba", ".2s",  VPR64,  v2i32,
511                     0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
512 def UABAvvv_4S :  NeonI_3VSame_Constraint_impl<"uaba", ".4s",  VPR128, v4i32,
513                     0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
514
515 // Vector Absolute Difference and Accumulate (Signed)
516 def SABAvvv_8B :  NeonI_3VSame_Constraint_impl<"saba", ".8b",  VPR64,  v8i8,
517                     0b0, 0b0, 0b00, 0b01111, Neon_saba>;
518 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
519                     0b1, 0b0, 0b00, 0b01111, Neon_saba>;
520 def SABAvvv_4H :  NeonI_3VSame_Constraint_impl<"saba", ".4h",  VPR64,  v4i16,
521                     0b0, 0b0, 0b01, 0b01111, Neon_saba>;
522 def SABAvvv_8H :  NeonI_3VSame_Constraint_impl<"saba", ".8h",  VPR128, v8i16,
523                     0b1, 0b0, 0b01, 0b01111, Neon_saba>;
524 def SABAvvv_2S :  NeonI_3VSame_Constraint_impl<"saba", ".2s",  VPR64,  v2i32,
525                     0b0, 0b0, 0b10, 0b01111, Neon_saba>;
526 def SABAvvv_4S :  NeonI_3VSame_Constraint_impl<"saba", ".4s",  VPR128, v4i32,
527                     0b1, 0b0, 0b10, 0b01111, Neon_saba>;
528
529
530 // Vector Absolute Difference (Signed, Unsigned)
531 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
532 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
533
534 // Vector Absolute Difference (Floating Point)
535 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
536                                     int_arm_neon_vabds, int_arm_neon_vabds,
537                                     int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
538
539 // Vector Reciprocal Step (Floating Point)
540 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
541                                        int_arm_neon_vrecps, int_arm_neon_vrecps,
542                                        int_arm_neon_vrecps,
543                                        v2f32, v4f32, v2f64, 0>;
544
545 // Vector Reciprocal Square Root Step (Floating Point)
546 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
547                                         int_arm_neon_vrsqrts,
548                                         int_arm_neon_vrsqrts,
549                                         int_arm_neon_vrsqrts,
550                                         v2f32, v4f32, v2f64, 0>;
551
552 // Vector Comparisons
553
554 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
555                         (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
556 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
557                          (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
558 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
559                         (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
560 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
561                         (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
562 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
563                         (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
564
565 // NeonI_compare_aliases class: swaps register operands to implement
566 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
567 class NeonI_compare_aliases<string asmop, string asmlane,
568                             Instruction inst, RegisterClass VPRC>
569   : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
570                     ", $Rm" # asmlane,
571                   (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
572
573 // Vector Comparisons (Integer)
574
575 // Vector Compare Mask Equal (Integer)
576 let isCommutable =1 in {
577 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
578 }
579
580 // Vector Compare Mask Higher or Same (Unsigned Integer)
581 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
582
583 // Vector Compare Mask Greater Than or Equal (Integer)
584 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
585
586 // Vector Compare Mask Higher (Unsigned Integer)
587 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
588
589 // Vector Compare Mask Greater Than (Integer)
590 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
591
592 // Vector Compare Mask Bitwise Test (Integer)
593 defm CMTSTvvv:  NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
594
595 // Vector Compare Mask Less or Same (Unsigned Integer)
596 // CMLS is alias for CMHS with operands reversed.
597 def CMLSvvv_8B  : NeonI_compare_aliases<"cmls", ".8b",  CMHSvvv_8B,  VPR64>;
598 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
599 def CMLSvvv_4H  : NeonI_compare_aliases<"cmls", ".4h",  CMHSvvv_4H,  VPR64>;
600 def CMLSvvv_8H  : NeonI_compare_aliases<"cmls", ".8h",  CMHSvvv_8H,  VPR128>;
601 def CMLSvvv_2S  : NeonI_compare_aliases<"cmls", ".2s",  CMHSvvv_2S,  VPR64>;
602 def CMLSvvv_4S  : NeonI_compare_aliases<"cmls", ".4s",  CMHSvvv_4S,  VPR128>;
603 def CMLSvvv_2D  : NeonI_compare_aliases<"cmls", ".2d",  CMHSvvv_2D,  VPR128>;
604
605 // Vector Compare Mask Less Than or Equal (Integer)
606 // CMLE is alias for CMGE with operands reversed.
607 def CMLEvvv_8B  : NeonI_compare_aliases<"cmle", ".8b",  CMGEvvv_8B,  VPR64>;
608 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
609 def CMLEvvv_4H  : NeonI_compare_aliases<"cmle", ".4h",  CMGEvvv_4H,  VPR64>;
610 def CMLEvvv_8H  : NeonI_compare_aliases<"cmle", ".8h",  CMGEvvv_8H,  VPR128>;
611 def CMLEvvv_2S  : NeonI_compare_aliases<"cmle", ".2s",  CMGEvvv_2S,  VPR64>;
612 def CMLEvvv_4S  : NeonI_compare_aliases<"cmle", ".4s",  CMGEvvv_4S,  VPR128>;
613 def CMLEvvv_2D  : NeonI_compare_aliases<"cmle", ".2d",  CMGEvvv_2D,  VPR128>;
614
615 // Vector Compare Mask Lower (Unsigned Integer)
616 // CMLO is alias for CMHI with operands reversed.
617 def CMLOvvv_8B  : NeonI_compare_aliases<"cmlo", ".8b",  CMHIvvv_8B,  VPR64>;
618 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
619 def CMLOvvv_4H  : NeonI_compare_aliases<"cmlo", ".4h",  CMHIvvv_4H,  VPR64>;
620 def CMLOvvv_8H  : NeonI_compare_aliases<"cmlo", ".8h",  CMHIvvv_8H,  VPR128>;
621 def CMLOvvv_2S  : NeonI_compare_aliases<"cmlo", ".2s",  CMHIvvv_2S,  VPR64>;
622 def CMLOvvv_4S  : NeonI_compare_aliases<"cmlo", ".4s",  CMHIvvv_4S,  VPR128>;
623 def CMLOvvv_2D  : NeonI_compare_aliases<"cmlo", ".2d",  CMHIvvv_2D,  VPR128>;
624
625 // Vector Compare Mask Less Than (Integer)
626 // CMLT is alias for CMGT with operands reversed.
627 def CMLTvvv_8B  : NeonI_compare_aliases<"cmlt", ".8b",  CMGTvvv_8B,  VPR64>;
628 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
629 def CMLTvvv_4H  : NeonI_compare_aliases<"cmlt", ".4h",  CMGTvvv_4H,  VPR64>;
630 def CMLTvvv_8H  : NeonI_compare_aliases<"cmlt", ".8h",  CMGTvvv_8H,  VPR128>;
631 def CMLTvvv_2S  : NeonI_compare_aliases<"cmlt", ".2s",  CMGTvvv_2S,  VPR64>;
632 def CMLTvvv_4S  : NeonI_compare_aliases<"cmlt", ".4s",  CMGTvvv_4S,  VPR128>;
633 def CMLTvvv_2D  : NeonI_compare_aliases<"cmlt", ".2d",  CMGTvvv_2D,  VPR128>;
634
635
636 def neon_uimm0_asmoperand : AsmOperandClass
637 {
638   let Name = "UImm0";
639   let PredicateMethod = "isUImm<0>";
640   let RenderMethod = "addImmOperands";
641 }
642
643 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
644   let ParserMatchClass = neon_uimm0_asmoperand;
645   let PrintMethod = "printNeonUImm0Operand";
646
647 }
648
649 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
650 {
651   def _8B :  NeonI_2VMisc<0b0, u, 0b00, opcode,
652              (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
653              asmop # "\t$Rd.8b, $Rn.8b, $Imm",
654              [(set (v8i8 VPR64:$Rd),
655                 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
656              NoItinerary>;
657
658   def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
659              (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
660              asmop # "\t$Rd.16b, $Rn.16b, $Imm",
661              [(set (v16i8 VPR128:$Rd),
662                 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
663              NoItinerary>;
664
665   def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
666             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
667             asmop # "\t$Rd.4h, $Rn.4h, $Imm",
668             [(set (v4i16 VPR64:$Rd),
669                (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
670             NoItinerary>;
671
672   def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
673             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
674             asmop # "\t$Rd.8h, $Rn.8h, $Imm",
675             [(set (v8i16 VPR128:$Rd),
676                (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
677             NoItinerary>;
678
679   def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
680             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
681             asmop # "\t$Rd.2s, $Rn.2s, $Imm",
682             [(set (v2i32 VPR64:$Rd),
683                (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
684             NoItinerary>;
685
686   def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
687             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
688             asmop # "\t$Rd.4s, $Rn.4s, $Imm",
689             [(set (v4i32 VPR128:$Rd),
690                (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
691             NoItinerary>;
692
693   def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
694             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
695             asmop # "\t$Rd.2d, $Rn.2d, $Imm",
696             [(set (v2i64 VPR128:$Rd),
697                (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
698             NoItinerary>;
699 }
700
701 // Vector Compare Mask Equal to Zero (Integer)
702 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
703
704 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
705 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
706
707 // Vector Compare Mask Greater Than Zero (Signed Integer)
708 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
709
710 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
711 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
712
713 // Vector Compare Mask Less Than Zero (Signed Integer)
714 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
715
716 // Vector Comparisons (Floating Point)
717
718 // Vector Compare Mask Equal (Floating Point)
719 let isCommutable =1 in {
720 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
721                                       Neon_cmeq, Neon_cmeq,
722                                       v2i32, v4i32, v2i64, 0>;
723 }
724
725 // Vector Compare Mask Greater Than Or Equal (Floating Point)
726 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
727                                       Neon_cmge, Neon_cmge,
728                                       v2i32, v4i32, v2i64, 0>;
729
730 // Vector Compare Mask Greater Than (Floating Point)
731 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
732                                       Neon_cmgt, Neon_cmgt,
733                                       v2i32, v4i32, v2i64, 0>;
734
735 // Vector Compare Mask Less Than Or Equal (Floating Point)
736 // FCMLE is alias for FCMGE with operands reversed.
737 def FCMLEvvv_2S  : NeonI_compare_aliases<"fcmle", ".2s",  FCMGEvvv_2S,  VPR64>;
738 def FCMLEvvv_4S  : NeonI_compare_aliases<"fcmle", ".4s",  FCMGEvvv_4S,  VPR128>;
739 def FCMLEvvv_2D  : NeonI_compare_aliases<"fcmle", ".2d",  FCMGEvvv_2D,  VPR128>;
740
741 // Vector Compare Mask Less Than (Floating Point)
742 // FCMLT is alias for FCMGT with operands reversed.
743 def FCMLTvvv_2S  : NeonI_compare_aliases<"fcmlt", ".2s",  FCMGTvvv_2S,  VPR64>;
744 def FCMLTvvv_4S  : NeonI_compare_aliases<"fcmlt", ".4s",  FCMGTvvv_4S,  VPR128>;
745 def FCMLTvvv_2D  : NeonI_compare_aliases<"fcmlt", ".2d",  FCMGTvvv_2D,  VPR128>;
746
747
748 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
749                               string asmop, CondCode CC>
750 {
751   def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
752             (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
753             asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
754             [(set (v2i32 VPR64:$Rd),
755                (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
756             NoItinerary>;
757
758   def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
759             (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
760             asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
761             [(set (v4i32 VPR128:$Rd),
762                (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
763             NoItinerary>;
764
765   def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
766             (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
767             asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
768             [(set (v2i64 VPR128:$Rd),
769                (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
770             NoItinerary>;
771 }
772
773 // Vector Compare Mask Equal to Zero (Floating Point)
774 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
775
776 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
777 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
778
779 // Vector Compare Mask Greater Than Zero (Floating Point)
780 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
781
782 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
783 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
784
785 // Vector Compare Mask Less Than Zero (Floating Point)
786 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
787
788 // Vector Absolute Comparisons (Floating Point)
789
790 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
791 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
792                                       int_arm_neon_vacged, int_arm_neon_vacgeq,
793                                       int_aarch64_neon_vacgeq,
794                                       v2i32, v4i32, v2i64, 0>;
795
796 // Vector Absolute Compare Mask Greater Than (Floating Point)
797 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
798                                       int_arm_neon_vacgtd, int_arm_neon_vacgtq,
799                                       int_aarch64_neon_vacgtq,
800                                       v2i32, v4i32, v2i64, 0>;
801
802 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
803 // FACLE is alias for FACGE with operands reversed.
804 def FACLEvvv_2S  : NeonI_compare_aliases<"facle", ".2s",  FACGEvvv_2S,  VPR64>;
805 def FACLEvvv_4S  : NeonI_compare_aliases<"facle", ".4s",  FACGEvvv_4S,  VPR128>;
806 def FACLEvvv_2D  : NeonI_compare_aliases<"facle", ".2d",  FACGEvvv_2D,  VPR128>;
807
808 // Vector Absolute Compare Mask Less Than (Floating Point)
809 // FACLT is alias for FACGT with operands reversed.
810 def FACLTvvv_2S  : NeonI_compare_aliases<"faclt", ".2s",  FACGTvvv_2S,  VPR64>;
811 def FACLTvvv_4S  : NeonI_compare_aliases<"faclt", ".4s",  FACGTvvv_4S,  VPR128>;
812 def FACLTvvv_2D  : NeonI_compare_aliases<"faclt", ".2d",  FACGTvvv_2D,  VPR128>;
813
814 // Vector halving add (Integer Signed, Unsigned)
815 defm SHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
816                                         int_arm_neon_vhadds, 1>;
817 defm UHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
818                                         int_arm_neon_vhaddu, 1>;
819
820 // Vector halving sub (Integer Signed, Unsigned)
821 defm SHSUBvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
822                                         int_arm_neon_vhsubs, 0>;
823 defm UHSUBvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
824                                         int_arm_neon_vhsubu, 0>;
825
826 // Vector rouding halving add (Integer Signed, Unsigned)
827 defm SRHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
828                                          int_arm_neon_vrhadds, 1>;
829 defm URHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
830                                          int_arm_neon_vrhaddu, 1>;
831
832 // Vector Saturating add (Integer Signed, Unsigned)
833 defm SQADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
834                    int_arm_neon_vqadds, 1>;
835 defm UQADDvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
836                    int_arm_neon_vqaddu, 1>;
837
838 // Vector Saturating sub (Integer Signed, Unsigned)
839 defm SQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
840                    int_arm_neon_vqsubs, 1>;
841 defm UQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
842                    int_arm_neon_vqsubu, 1>;
843
844 // Vector Shift Left (Signed and Unsigned Integer)
845 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
846                  int_arm_neon_vshifts, 1>;
847 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
848                  int_arm_neon_vshiftu, 1>;
849
850 // Vector Saturating Shift Left (Signed and Unsigned Integer)
851 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
852                   int_arm_neon_vqshifts, 1>;
853 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
854                   int_arm_neon_vqshiftu, 1>;
855
856 // Vector Rouding Shift Left (Signed and Unsigned Integer)
857 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
858                   int_arm_neon_vrshifts, 1>;
859 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
860                   int_arm_neon_vrshiftu, 1>;
861
862 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
863 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
864                    int_arm_neon_vqrshifts, 1>;
865 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
866                    int_arm_neon_vqrshiftu, 1>;
867
868 // Vector Maximum (Signed and Unsigned Integer)
869 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
870 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
871
872 // Vector Minimum (Signed and Unsigned Integer)
873 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
874 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
875
876 // Vector Maximum (Floating Point)
877 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
878                                      int_arm_neon_vmaxs, int_arm_neon_vmaxs,
879                                      int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
880
881 // Vector Minimum (Floating Point)
882 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
883                                      int_arm_neon_vmins, int_arm_neon_vmins,
884                                      int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
885
886 // Vector maxNum (Floating Point) -  prefer a number over a quiet NaN)
887 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
888                                        int_aarch64_neon_vmaxnm,
889                                        int_aarch64_neon_vmaxnm,
890                                        int_aarch64_neon_vmaxnm,
891                                        v2f32, v4f32, v2f64, 1>;
892
893 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
894 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
895                                        int_aarch64_neon_vminnm,
896                                        int_aarch64_neon_vminnm,
897                                        int_aarch64_neon_vminnm,
898                                        v2f32, v4f32, v2f64, 1>;
899
900 // Vector Maximum Pairwise (Signed and Unsigned Integer)
901 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
902 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
903
904 // Vector Minimum Pairwise (Signed and Unsigned Integer)
905 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
906 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
907
908 // Vector Maximum Pairwise (Floating Point)
909 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
910                                      int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
911                                      int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
912
913 // Vector Minimum Pairwise (Floating Point)
914 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
915                                      int_arm_neon_vpmins, int_arm_neon_vpmins,
916                                      int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
917
918 // Vector maxNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
919 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
920                                        int_aarch64_neon_vpmaxnm,
921                                        int_aarch64_neon_vpmaxnm,
922                                        int_aarch64_neon_vpmaxnm,
923                                        v2f32, v4f32, v2f64, 1>;
924
925 // Vector minNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
926 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
927                                        int_aarch64_neon_vpminnm,
928                                        int_aarch64_neon_vpminnm,
929                                        int_aarch64_neon_vpminnm,
930                                        v2f32, v4f32, v2f64, 1>;
931
932 // Vector Addition Pairwise (Integer)
933 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
934
935 // Vector Addition Pairwise (Floating Point)
936 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
937                                        int_arm_neon_vpadd,
938                                        int_arm_neon_vpadd,
939                                        int_arm_neon_vpadd,
940                                        v2f32, v4f32, v2f64, 1>;
941
942 // Vector Saturating Doubling Multiply High
943 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
944                     int_arm_neon_vqdmulh, 1>;
945
946 // Vector Saturating Rouding Doubling Multiply High
947 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
948                      int_arm_neon_vqrdmulh, 1>;
949
950 // Vector Multiply Extended (Floating Point)
951 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
952                                       int_aarch64_neon_vmulx,
953                                       int_aarch64_neon_vmulx,
954                                       int_aarch64_neon_vmulx,
955                                       v2f32, v4f32, v2f64, 1>;
956
957 // Vector Immediate Instructions
958
959 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
960 {
961   def _asmoperand : AsmOperandClass
962     {
963       let Name = "NeonMovImmShift" # PREFIX;
964       let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
965       let PredicateMethod = "isNeonMovImmShift" # PREFIX;
966     }
967 }
968
969 // Definition of vector immediates shift operands
970
971 // The selectable use-cases extract the shift operation
972 // information from the OpCmode fields encoded in the immediate.
973 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
974   uint64_t OpCmode = N->getZExtValue();
975   unsigned ShiftImm;
976   unsigned ShiftOnesIn;
977   unsigned HasShift =
978     A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
979   if (!HasShift) return SDValue();
980   return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
981 }]>;
982
983 // Vector immediates shift operands which accept LSL and MSL
984 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
985 // or 0, 8 (LSLH) or 8, 16 (MSL).
986 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
987 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
988 // LSLH restricts shift amount to  0, 8 out of 0, 8, 16, 24
989 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
990
991 multiclass neon_mov_imm_shift_operands<string PREFIX,
992                                        string HALF, string ISHALF, code pred>
993 {
994    def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
995     {
996       let PrintMethod =
997         "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
998       let DecoderMethod =
999         "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1000       let ParserMatchClass =
1001         !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1002     }
1003 }
1004
1005 defm neon_mov_imm_LSL  : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1006   unsigned ShiftImm;
1007   unsigned ShiftOnesIn;
1008   unsigned HasShift =
1009     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1010   return (HasShift && !ShiftOnesIn);
1011 }]>;
1012
1013 defm neon_mov_imm_MSL  : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1014   unsigned ShiftImm;
1015   unsigned ShiftOnesIn;
1016   unsigned HasShift =
1017     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1018   return (HasShift && ShiftOnesIn);
1019 }]>;
1020
1021 defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1022   unsigned ShiftImm;
1023   unsigned ShiftOnesIn;
1024   unsigned HasShift =
1025     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1026   return (HasShift && !ShiftOnesIn);
1027 }]>;
1028
1029 def neon_uimm8_asmoperand : AsmOperandClass
1030 {
1031   let Name = "UImm8";
1032   let PredicateMethod = "isUImm<8>";
1033   let RenderMethod = "addImmOperands";
1034 }
1035
1036 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1037   let ParserMatchClass = neon_uimm8_asmoperand;
1038   let PrintMethod = "printNeonUImm8Operand";
1039 }
1040
1041 def neon_uimm64_mask_asmoperand : AsmOperandClass
1042 {
1043   let Name = "NeonUImm64Mask";
1044   let PredicateMethod = "isNeonUImm64Mask";
1045   let RenderMethod = "addNeonUImm64MaskOperands";
1046 }
1047
1048 // MCOperand for 64-bit bytemask with each byte having only the
1049 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1050 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1051   let ParserMatchClass = neon_uimm64_mask_asmoperand;
1052   let PrintMethod = "printNeonUImm64MaskOperand";
1053 }
1054
1055 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1056                                    SDPatternOperator opnode>
1057 {
1058     // shift zeros, per word
1059     def _2S  : NeonI_1VModImm<0b0, op,
1060                               (outs VPR64:$Rd),
1061                               (ins neon_uimm8:$Imm,
1062                                 neon_mov_imm_LSL_operand:$Simm),
1063                               !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1064                               [(set (v2i32 VPR64:$Rd),
1065                                  (v2i32 (opnode (timm:$Imm),
1066                                    (neon_mov_imm_LSL_operand:$Simm))))],
1067                               NoItinerary> {
1068        bits<2> Simm;
1069        let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1070      }
1071
1072     def _4S  : NeonI_1VModImm<0b1, op,
1073                               (outs VPR128:$Rd),
1074                               (ins neon_uimm8:$Imm,
1075                                 neon_mov_imm_LSL_operand:$Simm),
1076                               !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1077                               [(set (v4i32 VPR128:$Rd),
1078                                  (v4i32 (opnode (timm:$Imm),
1079                                    (neon_mov_imm_LSL_operand:$Simm))))],
1080                               NoItinerary> {
1081       bits<2> Simm;
1082       let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1083     }
1084
1085     // shift zeros, per halfword
1086     def _4H  : NeonI_1VModImm<0b0, op,
1087                               (outs VPR64:$Rd),
1088                               (ins neon_uimm8:$Imm,
1089                                 neon_mov_imm_LSLH_operand:$Simm),
1090                               !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1091                               [(set (v4i16 VPR64:$Rd),
1092                                  (v4i16 (opnode (timm:$Imm),
1093                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1094                               NoItinerary> {
1095       bit  Simm;
1096       let cmode = {0b1, 0b0, Simm, 0b0};
1097     }
1098
1099     def _8H  : NeonI_1VModImm<0b1, op,
1100                               (outs VPR128:$Rd),
1101                               (ins neon_uimm8:$Imm,
1102                                 neon_mov_imm_LSLH_operand:$Simm),
1103                               !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1104                               [(set (v8i16 VPR128:$Rd),
1105                                  (v8i16 (opnode (timm:$Imm),
1106                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1107                               NoItinerary> {
1108       bit Simm;
1109       let cmode = {0b1, 0b0, Simm, 0b0};
1110      }
1111 }
1112
1113 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1114                                                    SDPatternOperator opnode,
1115                                                    SDPatternOperator neonopnode>
1116 {
1117   let Constraints = "$src = $Rd" in {
1118     // shift zeros, per word
1119     def _2S  : NeonI_1VModImm<0b0, op,
1120                  (outs VPR64:$Rd),
1121                  (ins VPR64:$src, neon_uimm8:$Imm,
1122                    neon_mov_imm_LSL_operand:$Simm),
1123                  !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1124                  [(set (v2i32 VPR64:$Rd),
1125                     (v2i32 (opnode (v2i32 VPR64:$src),
1126                       (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
1127                         neon_mov_imm_LSL_operand:$Simm)))))))],
1128                  NoItinerary> {
1129       bits<2> Simm;
1130       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1131     }
1132
1133     def _4S  : NeonI_1VModImm<0b1, op,
1134                  (outs VPR128:$Rd),
1135                  (ins VPR128:$src, neon_uimm8:$Imm,
1136                    neon_mov_imm_LSL_operand:$Simm),
1137                  !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1138                  [(set (v4i32 VPR128:$Rd),
1139                     (v4i32 (opnode (v4i32 VPR128:$src),
1140                       (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
1141                         neon_mov_imm_LSL_operand:$Simm)))))))],
1142                  NoItinerary> {
1143       bits<2> Simm;
1144       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1145     }
1146
1147     // shift zeros, per halfword
1148     def _4H  : NeonI_1VModImm<0b0, op,
1149                  (outs VPR64:$Rd),
1150                  (ins VPR64:$src, neon_uimm8:$Imm,
1151                    neon_mov_imm_LSLH_operand:$Simm),
1152                  !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1153                  [(set (v4i16 VPR64:$Rd),
1154                     (v4i16 (opnode (v4i16 VPR64:$src),
1155                        (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
1156                           neon_mov_imm_LSL_operand:$Simm)))))))],
1157                  NoItinerary> {
1158       bit  Simm;
1159       let cmode = {0b1, 0b0, Simm, 0b1};
1160     }
1161
1162     def _8H  : NeonI_1VModImm<0b1, op,
1163                  (outs VPR128:$Rd),
1164                  (ins VPR128:$src, neon_uimm8:$Imm,
1165                    neon_mov_imm_LSLH_operand:$Simm),
1166                  !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1167                  [(set (v8i16 VPR128:$Rd),
1168                     (v8i16 (opnode (v8i16 VPR128:$src),
1169                       (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
1170                         neon_mov_imm_LSL_operand:$Simm)))))))],
1171                  NoItinerary> {
1172       bit Simm;
1173       let cmode = {0b1, 0b0, Simm, 0b1};
1174     }
1175   }
1176 }
1177
1178 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1179                                    SDPatternOperator opnode>
1180 {
1181     // shift ones, per word
1182     def _2S  : NeonI_1VModImm<0b0, op,
1183                              (outs VPR64:$Rd),
1184                              (ins neon_uimm8:$Imm,
1185                                neon_mov_imm_MSL_operand:$Simm),
1186                              !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1187                               [(set (v2i32 VPR64:$Rd),
1188                                  (v2i32 (opnode (timm:$Imm),
1189                                    (neon_mov_imm_MSL_operand:$Simm))))],
1190                              NoItinerary> {
1191        bit Simm;
1192        let cmode = {0b1, 0b1, 0b0, Simm};
1193      }
1194
1195    def _4S  : NeonI_1VModImm<0b1, op,
1196                               (outs VPR128:$Rd),
1197                               (ins neon_uimm8:$Imm,
1198                                 neon_mov_imm_MSL_operand:$Simm),
1199                               !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1200                               [(set (v4i32 VPR128:$Rd),
1201                                  (v4i32 (opnode (timm:$Imm),
1202                                    (neon_mov_imm_MSL_operand:$Simm))))],
1203                               NoItinerary> {
1204      bit Simm;
1205      let cmode = {0b1, 0b1, 0b0, Simm};
1206    }
1207 }
1208
1209 // Vector Move Immediate Shifted
1210 let isReMaterializable = 1 in {
1211 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1212 }
1213
1214 // Vector Move Inverted Immediate Shifted
1215 let isReMaterializable = 1 in {
1216 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1217 }
1218
1219 // Vector Bitwise Bit Clear (AND NOT) - immediate
1220 let isReMaterializable = 1 in {
1221 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1222                                                          and, Neon_mvni>;
1223 }
1224
1225 // Vector Bitwise OR - immedidate
1226
1227 let isReMaterializable = 1 in {
1228 defm ORRvi_lsl   : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1229                                                            or, Neon_movi>;
1230 }
1231
1232 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1233 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1234 // BIC immediate instructions selection requires additional patterns to
1235 // transform Neon_movi operands into BIC immediate operands
1236
1237 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1238   uint64_t OpCmode = N->getZExtValue();
1239   unsigned ShiftImm;
1240   unsigned ShiftOnesIn;
1241   (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1242   // LSLH restricts shift amount to  0, 8 which are encoded as 0 and 1
1243   // Transform encoded shift amount 0 to 1 and 1 to 0.
1244   return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1245 }]>;
1246
1247 def neon_mov_imm_LSLH_transform_operand
1248   : ImmLeaf<i32, [{
1249     unsigned ShiftImm;
1250     unsigned ShiftOnesIn;
1251     unsigned HasShift =
1252       A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1253     return (HasShift && !ShiftOnesIn); }],
1254   neon_mov_imm_LSLH_transform_XFORM>;
1255
1256 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
1257 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
1258 def : Pat<(v4i16 (and VPR64:$src,
1259             (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1260           (BICvi_lsl_4H VPR64:$src, 0,
1261             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1262
1263 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
1264 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
1265 def : Pat<(v8i16 (and VPR128:$src,
1266             (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1267           (BICvi_lsl_8H VPR128:$src, 0,
1268             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1269
1270
1271 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1272                                    SDPatternOperator neonopnode,
1273                                    Instruction INST4H,
1274                                    Instruction INST8H> {
1275   def : Pat<(v8i8 (opnode VPR64:$src,
1276                     (bitconvert(v4i16 (neonopnode timm:$Imm,
1277                       neon_mov_imm_LSLH_operand:$Simm))))),
1278             (INST4H VPR64:$src, neon_uimm8:$Imm,
1279               neon_mov_imm_LSLH_operand:$Simm)>;
1280   def : Pat<(v1i64 (opnode VPR64:$src,
1281                   (bitconvert(v4i16 (neonopnode timm:$Imm,
1282                     neon_mov_imm_LSLH_operand:$Simm))))),
1283           (INST4H VPR64:$src, neon_uimm8:$Imm,
1284             neon_mov_imm_LSLH_operand:$Simm)>;
1285
1286   def : Pat<(v16i8 (opnode VPR128:$src,
1287                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1288                      neon_mov_imm_LSLH_operand:$Simm))))),
1289           (INST8H VPR128:$src, neon_uimm8:$Imm,
1290             neon_mov_imm_LSLH_operand:$Simm)>;
1291   def : Pat<(v4i32 (opnode VPR128:$src,
1292                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1293                      neon_mov_imm_LSLH_operand:$Simm))))),
1294           (INST8H VPR128:$src, neon_uimm8:$Imm,
1295             neon_mov_imm_LSLH_operand:$Simm)>;
1296   def : Pat<(v2i64 (opnode VPR128:$src,
1297                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1298                      neon_mov_imm_LSLH_operand:$Simm))))),
1299           (INST8H VPR128:$src, neon_uimm8:$Imm,
1300             neon_mov_imm_LSLH_operand:$Simm)>;
1301 }
1302
1303 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1304 defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
1305
1306 // Additional patterns for Vector Bitwise OR - immedidate
1307 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
1308
1309
1310 // Vector Move Immediate Masked
1311 let isReMaterializable = 1 in {
1312 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1313 }
1314
1315 // Vector Move Inverted Immediate Masked
1316 let isReMaterializable = 1 in {
1317 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1318 }
1319
1320 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1321                                 Instruction inst, RegisterClass VPRC>
1322   : NeonInstAlias<!strconcat(asmop, " $Rd," # asmlane # ", $Imm"),
1323                         (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
1324
1325 // Aliases for Vector Move Immediate Shifted
1326 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1327 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1328 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1329 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1330
1331 // Aliases for Vector Move Inverted Immediate Shifted
1332 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1333 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1334 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1335 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1336
1337 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1338 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1339 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1340 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1341 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1342
1343 // Aliases for Vector Bitwise OR - immedidate
1344 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1345 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1346 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1347 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1348
1349 //  Vector Move Immediate - per byte
1350 let isReMaterializable = 1 in {
1351 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1352                                (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1353                                "movi\t$Rd.8b, $Imm",
1354                                [(set (v8i8 VPR64:$Rd),
1355                                   (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1356                                 NoItinerary> {
1357   let cmode = 0b1110;
1358 }
1359
1360 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1361                                 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1362                                 "movi\t$Rd.16b, $Imm",
1363                                 [(set (v16i8 VPR128:$Rd),
1364                                    (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1365                                  NoItinerary> {
1366   let cmode = 0b1110;
1367 }
1368 }
1369
1370 // Vector Move Immediate - bytemask, per double word
1371 let isReMaterializable = 1 in {
1372 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1373                                (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1374                                "movi\t $Rd.2d, $Imm",
1375                                [(set (v2i64 VPR128:$Rd),
1376                                   (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1377                                NoItinerary> {
1378   let cmode = 0b1110;
1379 }
1380 }
1381
1382 // Vector Move Immediate - bytemask, one doubleword
1383
1384 let isReMaterializable = 1 in {
1385 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1386                            (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1387                            "movi\t $Rd, $Imm",
1388                            [(set (f64 FPR64:$Rd),
1389                               (f64 (bitconvert
1390                                 (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))],
1391                            NoItinerary> {
1392   let cmode = 0b1110;
1393 }
1394 }
1395
1396 // Vector Floating Point Move Immediate
1397
1398 class NeonI_FMOV_impl<string asmlane, RegisterClass VPRC, ValueType OpTy,
1399                       Operand immOpType, bit q, bit op>
1400   : NeonI_1VModImm<q, op,
1401                    (outs VPRC:$Rd), (ins immOpType:$Imm),
1402                    "fmov\t$Rd" # asmlane # ", $Imm",
1403                    [(set (OpTy VPRC:$Rd),
1404                       (OpTy (Neon_fmovi (timm:$Imm))))],
1405                    NoItinerary> {
1406      let cmode = 0b1111;
1407    }
1408
1409 let isReMaterializable = 1 in {
1410 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64,  v2f32, fmov32_operand, 0b0, 0b0>;
1411 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1412 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1413 }
1414
1415 // Vector Shift (Immediate) 
1416
1417 def imm0_63 : Operand<i32> {
1418   let ParserMatchClass = uimm6_asmoperand;
1419 }
1420
1421 class N2VShiftLeft<bit q, bit u, bits<5> opcode, string asmop, string T,
1422                    RegisterClass VPRC, ValueType Ty, Operand ImmTy>
1423   : NeonI_2VShiftImm<q, u, opcode,
1424                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1425                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1426                      [(set (Ty VPRC:$Rd),
1427                         (Ty (shl (Ty VPRC:$Rn), 
1428                           (Ty (Neon_dupImm (i32 imm:$Imm))))))],
1429                      NoItinerary>;
1430
1431 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1432   // 64-bit vector types.
1433   def _8B  : N2VShiftLeft<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3> {
1434     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1435   }
1436
1437   def _4H  : N2VShiftLeft<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4> {
1438     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1439   }
1440
1441   def _2S  : N2VShiftLeft<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5> {
1442     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1443   }
1444
1445   // 128-bit vector types.
1446   def _16B : N2VShiftLeft<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3> {
1447     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1448   }
1449
1450   def _8H : N2VShiftLeft<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4> {
1451     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1452   }
1453
1454   def _4S : N2VShiftLeft<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5> {
1455     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1456   }
1457
1458   def _2D : N2VShiftLeft<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63> {
1459     let Inst{22} = 0b1;        // immh:immb = 1xxxxxx
1460   }
1461 }
1462
1463 def Neon_top16B : PatFrag<(ops node:$in), 
1464                           (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1465 def Neon_top8H : PatFrag<(ops node:$in), 
1466                          (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1467 def Neon_top4S : PatFrag<(ops node:$in), 
1468                          (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1469
1470 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1471                    string SrcT, ValueType DestTy, ValueType SrcTy,
1472                    Operand ImmTy, SDPatternOperator ExtOp>
1473   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1474                      (ins VPR64:$Rn, ImmTy:$Imm),
1475                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1476                      [(set (DestTy VPR128:$Rd),
1477                         (DestTy (shl 
1478                           (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1479                             (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1480                      NoItinerary>;
1481
1482 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1483                        string SrcT, ValueType DestTy, ValueType SrcTy, 
1484                        int StartIndex, Operand ImmTy,
1485                        SDPatternOperator ExtOp, PatFrag getTop>
1486   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1487                      (ins VPR128:$Rn, ImmTy:$Imm),
1488                      asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1489                      [(set (DestTy VPR128:$Rd), 
1490                         (DestTy (shl
1491                           (DestTy (ExtOp 
1492                             (SrcTy (getTop VPR128:$Rn)))),
1493                               (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1494                      NoItinerary>;
1495
1496 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1497                          SDNode ExtOp> {
1498   // 64-bit vector types.
1499   def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1500                          uimm3, ExtOp>{
1501     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1502   }
1503
1504   def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1505                          uimm4, ExtOp>{
1506     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1507   }
1508
1509   def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1510                          uimm5, ExtOp>{
1511     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1512   }
1513
1514   // 128-bit vector types
1515   def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b",
1516                               v8i16, v8i8, 8, uimm3, ExtOp, Neon_top16B>{
1517     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1518   }
1519
1520   def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h",
1521                              v4i32, v4i16, 4, uimm4, ExtOp, Neon_top8H>{
1522     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1523   }
1524
1525   def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s",
1526                              v2i64, v2i32, 2, uimm5, ExtOp, Neon_top4S>{
1527     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1528   }
1529
1530   // Use other patterns to match when the immediate is 0.
1531   def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1532             (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1533
1534   def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1535             (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1536
1537   def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1538             (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1539
1540   def : Pat<(v8i16 (ExtOp (v8i8 (Neon_top16B VPR128:$Rn)))),
1541             (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1542
1543   def : Pat<(v4i32 (ExtOp (v4i16 (Neon_top8H VPR128:$Rn)))),
1544             (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1545
1546   def : Pat<(v2i64 (ExtOp (v2i32 (Neon_top4S VPR128:$Rn)))),
1547             (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1548 }
1549
1550 // Shift left immediate
1551 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1552
1553 // Shift left long immediate
1554 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1555 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1556
1557 // Scalar Arithmetic
1558
1559 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
1560   : NeonI_Scalar3Same<u, 0b11, opcode,
1561                 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
1562                 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1563                 [],
1564                 NoItinerary>;
1565
1566 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
1567                                         string asmop, bit Commutable = 0>
1568 {
1569   let isCommutable = Commutable in {
1570     def bbb : NeonI_Scalar3Same<u, 0b00, opcode,
1571                                 (outs FPR8:$Rd), (ins FPR8:$Rn, FPR8:$Rm),
1572                                 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1573                                 [],
1574                                 NoItinerary>;
1575     def hhh : NeonI_Scalar3Same<u, 0b01, opcode,
1576                                 (outs FPR16:$Rd), (ins FPR16:$Rn, FPR16:$Rm),
1577                                 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1578                                 [],
1579                                 NoItinerary>;
1580     def sss : NeonI_Scalar3Same<u, 0b10, opcode,
1581                                 (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm),
1582                                 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1583                                 [],
1584                                 NoItinerary>;
1585     def ddd : NeonI_Scalar3Same<u, 0b11, opcode,
1586                                (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
1587                                !strconcat(asmop, " $Rd, $Rn, $Rm"),
1588                                [],
1589                                NoItinerary>;
1590   }
1591 }
1592
1593 class Neon_Scalar_D_size_patterns<SDPatternOperator opnode, Instruction INSTD>
1594   : Pat<(v1i64 (opnode (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
1595         (SUBREG_TO_REG (i64 0),
1596               (INSTD (EXTRACT_SUBREG VPR64:$Rn, sub_64),
1597              (EXTRACT_SUBREG VPR64:$Rm, sub_64)),
1598           sub_64)>;
1599
1600
1601 // Scalar Integer Add
1602 let isCommutable = 1 in {
1603 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
1604 }
1605
1606 // Scalar Integer Sub
1607 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
1608
1609 // Pattern for Scalar Integer Add and Sub with D register
1610 def : Neon_Scalar_D_size_patterns<add, ADDddd>;
1611 def : Neon_Scalar_D_size_patterns<sub, SUBddd>;
1612
1613 // Scalar Integer Saturating Add (Signed, Unsigned)
1614 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
1615 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
1616
1617 // Scalar Integer Saturating Sub (Signed, Unsigned)
1618 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
1619 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
1620
1621 // Patterns for Scalar Integer Saturating Add, Sub with D register only
1622 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
1623 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
1624 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
1625 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
1626
1627 // Scalar Integer Shift Left (Signed, Unsigned)
1628 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
1629 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
1630
1631 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
1632 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
1633 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
1634
1635 // Scalar Integer Rouding Shift Left (Signed, Unsigned)
1636 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
1637 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
1638
1639 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
1640 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
1641 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
1642
1643 // Patterns for Scalar Integer Shift Lef, Saturating Shift Left,
1644 // Rounding Shift Left, Rounding Saturating Shift Left with D register only
1645 def : Neon_Scalar_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
1646 def : Neon_Scalar_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
1647 def : Neon_Scalar_D_size_patterns<shl, SSHLddd>;
1648 def : Neon_Scalar_D_size_patterns<shl, USHLddd>;
1649 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
1650 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
1651 def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
1652 def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
1653 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
1654 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
1655
1656
1657 //===----------------------------------------------------------------------===//
1658 // Non-Instruction Patterns
1659 //===----------------------------------------------------------------------===//
1660
1661 // 64-bit vector bitcasts...
1662
1663 def : Pat<(v1i64 (bitconvert (v8i8  VPR64:$src))), (v1i64 VPR64:$src)>;
1664 def : Pat<(v2f32 (bitconvert (v8i8  VPR64:$src))), (v2f32 VPR64:$src)>;
1665 def : Pat<(v2i32 (bitconvert (v8i8  VPR64:$src))), (v2i32 VPR64:$src)>;
1666 def : Pat<(v4i16 (bitconvert (v8i8  VPR64:$src))), (v4i16 VPR64:$src)>;
1667
1668 def : Pat<(v1i64 (bitconvert (v4i16  VPR64:$src))), (v1i64 VPR64:$src)>;
1669 def : Pat<(v2i32 (bitconvert (v4i16  VPR64:$src))), (v2i32 VPR64:$src)>;
1670 def : Pat<(v2f32 (bitconvert (v4i16  VPR64:$src))), (v2f32 VPR64:$src)>;
1671 def : Pat<(v8i8  (bitconvert (v4i16  VPR64:$src))), (v8i8 VPR64:$src)>;
1672
1673 def : Pat<(v1i64 (bitconvert (v2i32  VPR64:$src))), (v1i64 VPR64:$src)>;
1674 def : Pat<(v2f32 (bitconvert (v2i32  VPR64:$src))), (v2f32 VPR64:$src)>;
1675 def : Pat<(v4i16 (bitconvert (v2i32  VPR64:$src))), (v4i16 VPR64:$src)>;
1676 def : Pat<(v8i8  (bitconvert (v2i32  VPR64:$src))), (v8i8 VPR64:$src)>;
1677
1678 def : Pat<(v1i64 (bitconvert (v2f32  VPR64:$src))), (v1i64 VPR64:$src)>;
1679 def : Pat<(v2i32 (bitconvert (v2f32  VPR64:$src))), (v2i32 VPR64:$src)>;
1680 def : Pat<(v4i16 (bitconvert (v2f32  VPR64:$src))), (v4i16 VPR64:$src)>;
1681 def : Pat<(v8i8  (bitconvert (v2f32  VPR64:$src))), (v8i8 VPR64:$src)>;
1682
1683 def : Pat<(v2f32 (bitconvert (v1i64  VPR64:$src))), (v2f32 VPR64:$src)>;
1684 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
1685 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
1686 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
1687
1688 // ..and 128-bit vector bitcasts...
1689
1690 def : Pat<(v2f64 (bitconvert (v16i8  VPR128:$src))), (v2f64 VPR128:$src)>;
1691 def : Pat<(v2i64 (bitconvert (v16i8  VPR128:$src))), (v2i64 VPR128:$src)>;
1692 def : Pat<(v4f32 (bitconvert (v16i8  VPR128:$src))), (v4f32 VPR128:$src)>;
1693 def : Pat<(v4i32 (bitconvert (v16i8  VPR128:$src))), (v4i32 VPR128:$src)>;
1694 def : Pat<(v8i16 (bitconvert (v16i8  VPR128:$src))), (v8i16 VPR128:$src)>;
1695
1696 def : Pat<(v2f64 (bitconvert (v8i16  VPR128:$src))), (v2f64 VPR128:$src)>;
1697 def : Pat<(v2i64 (bitconvert (v8i16  VPR128:$src))), (v2i64 VPR128:$src)>;
1698 def : Pat<(v4i32 (bitconvert (v8i16  VPR128:$src))), (v4i32 VPR128:$src)>;
1699 def : Pat<(v4f32 (bitconvert (v8i16  VPR128:$src))), (v4f32 VPR128:$src)>;
1700 def : Pat<(v16i8 (bitconvert (v8i16  VPR128:$src))), (v16i8 VPR128:$src)>;
1701
1702 def : Pat<(v2f64 (bitconvert (v4i32  VPR128:$src))), (v2f64 VPR128:$src)>;
1703 def : Pat<(v2i64 (bitconvert (v4i32  VPR128:$src))), (v2i64 VPR128:$src)>;
1704 def : Pat<(v4f32 (bitconvert (v4i32  VPR128:$src))), (v4f32 VPR128:$src)>;
1705 def : Pat<(v8i16 (bitconvert (v4i32  VPR128:$src))), (v8i16 VPR128:$src)>;
1706 def : Pat<(v16i8 (bitconvert (v4i32  VPR128:$src))), (v16i8 VPR128:$src)>;
1707
1708 def : Pat<(v2f64 (bitconvert (v4f32  VPR128:$src))), (v2f64 VPR128:$src)>;
1709 def : Pat<(v2i64 (bitconvert (v4f32  VPR128:$src))), (v2i64 VPR128:$src)>;
1710 def : Pat<(v4i32 (bitconvert (v4f32  VPR128:$src))), (v4i32 VPR128:$src)>;
1711 def : Pat<(v8i16 (bitconvert (v4f32  VPR128:$src))), (v8i16 VPR128:$src)>;
1712 def : Pat<(v16i8 (bitconvert (v4f32  VPR128:$src))), (v16i8 VPR128:$src)>;
1713
1714 def : Pat<(v2f64 (bitconvert (v2i64  VPR128:$src))), (v2f64 VPR128:$src)>;
1715 def : Pat<(v4f32 (bitconvert (v2i64  VPR128:$src))), (v4f32 VPR128:$src)>;
1716 def : Pat<(v4i32 (bitconvert (v2i64  VPR128:$src))), (v4i32 VPR128:$src)>;
1717 def : Pat<(v8i16 (bitconvert (v2i64  VPR128:$src))), (v8i16 VPR128:$src)>;
1718 def : Pat<(v16i8 (bitconvert (v2i64  VPR128:$src))), (v16i8 VPR128:$src)>;
1719
1720 def : Pat<(v2i64 (bitconvert (v2f64  VPR128:$src))), (v2i64 VPR128:$src)>;
1721 def : Pat<(v4f32 (bitconvert (v2f64  VPR128:$src))), (v4f32 VPR128:$src)>;
1722 def : Pat<(v4i32 (bitconvert (v2f64  VPR128:$src))), (v4i32 VPR128:$src)>;
1723 def : Pat<(v8i16 (bitconvert (v2f64  VPR128:$src))), (v8i16 VPR128:$src)>;
1724 def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
1725
1726
1727 // ...and scalar bitcasts...
1728
1729 def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))),
1730                  (f64 (EXTRACT_SUBREG (v8i8  VPR64:$src), sub_64))>;
1731 def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))),
1732                  (f64 (EXTRACT_SUBREG (v4i16  VPR64:$src), sub_64))>;
1733 def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))),
1734                  (f64 (EXTRACT_SUBREG (v2i32  VPR64:$src), sub_64))>;
1735 def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))),
1736                  (f64 (EXTRACT_SUBREG (v2f32  VPR64:$src), sub_64))>;
1737 def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))),
1738                  (f64 (EXTRACT_SUBREG (v1i64  VPR64:$src), sub_64))>;
1739 def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))),
1740                  (f128 (EXTRACT_SUBREG (v16i8  VPR128:$src), sub_alias))>;
1741 def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))),
1742                  (f128 (EXTRACT_SUBREG (v8i16  VPR128:$src), sub_alias))>;
1743 def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))),
1744                  (f128 (EXTRACT_SUBREG (v4i32  VPR128:$src), sub_alias))>;
1745 def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))),
1746                  (f128 (EXTRACT_SUBREG (v2i64  VPR128:$src), sub_alias))>;
1747 def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))),
1748                  (f128 (EXTRACT_SUBREG (v4f32  VPR128:$src), sub_alias))>;
1749 def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))),
1750                  (f128 (EXTRACT_SUBREG (v2f64  VPR128:$src), sub_alias))>;
1751
1752 def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))),
1753                   (v8i8 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
1754 def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))),
1755                   (v4i16 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
1756 def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))),
1757                   (v2i32 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
1758 def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))),
1759                   (v2f32 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
1760 def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))),
1761                   (v1i64 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
1762 def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))),
1763                   (v16i8 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
1764                   sub_alias))>;
1765 def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))),
1766                   (v8i16 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
1767                   sub_alias))>;
1768 def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))),
1769                   (v4i32 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
1770                   sub_alias))>;
1771 def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))),
1772                   (v2i64 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
1773                   sub_alias))>;
1774 def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))),
1775                   (v4f32 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
1776                   sub_alias))>;
1777 def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))),
1778                   (v2f64 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
1779                   sub_alias))>;