1 //===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM NEON instruction set.
12 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // NEON-specific Operands.
17 //===----------------------------------------------------------------------===//
18 def nModImm : Operand<i32> {
19 let PrintMethod = "printNEONModImmOperand";
22 def nImmSplatI8AsmOperand : AsmOperandClass { let Name = "NEONi8splat"; }
23 def nImmSplatI8 : Operand<i32> {
24 let PrintMethod = "printNEONModImmOperand";
25 let ParserMatchClass = nImmSplatI8AsmOperand;
27 def nImmSplatI16AsmOperand : AsmOperandClass { let Name = "NEONi16splat"; }
28 def nImmSplatI16 : Operand<i32> {
29 let PrintMethod = "printNEONModImmOperand";
30 let ParserMatchClass = nImmSplatI16AsmOperand;
32 def nImmSplatI32AsmOperand : AsmOperandClass { let Name = "NEONi32splat"; }
33 def nImmSplatI32 : Operand<i32> {
34 let PrintMethod = "printNEONModImmOperand";
35 let ParserMatchClass = nImmSplatI32AsmOperand;
37 def nImmVMOVI32AsmOperand : AsmOperandClass { let Name = "NEONi32vmov"; }
38 def nImmVMOVI32 : Operand<i32> {
39 let PrintMethod = "printNEONModImmOperand";
40 let ParserMatchClass = nImmVMOVI32AsmOperand;
42 def nImmVMOVF32 : Operand<i32> {
43 let PrintMethod = "printFPImmOperand";
44 let ParserMatchClass = FPImmOperand;
46 def nImmSplatI64AsmOperand : AsmOperandClass { let Name = "NEONi64splat"; }
47 def nImmSplatI64 : Operand<i32> {
48 let PrintMethod = "printNEONModImmOperand";
49 let ParserMatchClass = nImmSplatI64AsmOperand;
52 def VectorIndex8Operand : AsmOperandClass { let Name = "VectorIndex8"; }
53 def VectorIndex16Operand : AsmOperandClass { let Name = "VectorIndex16"; }
54 def VectorIndex32Operand : AsmOperandClass { let Name = "VectorIndex32"; }
55 def VectorIndex8 : Operand<i32>, ImmLeaf<i32, [{
56 return ((uint64_t)Imm) < 8;
58 let ParserMatchClass = VectorIndex8Operand;
59 let PrintMethod = "printVectorIndex";
60 let MIOperandInfo = (ops i32imm);
62 def VectorIndex16 : Operand<i32>, ImmLeaf<i32, [{
63 return ((uint64_t)Imm) < 4;
65 let ParserMatchClass = VectorIndex16Operand;
66 let PrintMethod = "printVectorIndex";
67 let MIOperandInfo = (ops i32imm);
69 def VectorIndex32 : Operand<i32>, ImmLeaf<i32, [{
70 return ((uint64_t)Imm) < 2;
72 let ParserMatchClass = VectorIndex32Operand;
73 let PrintMethod = "printVectorIndex";
74 let MIOperandInfo = (ops i32imm);
77 def VecListOneDAsmOperand : AsmOperandClass {
78 let Name = "VecListOneD";
79 let ParserMethod = "parseVectorList";
81 def VecListOneD : RegisterOperand<DPR, "printVectorListOne"> {
82 let ParserMatchClass = VecListOneDAsmOperand;
84 // Register list of two sequential D registers.
85 def VecListTwoDAsmOperand : AsmOperandClass {
86 let Name = "VecListTwoD";
87 let ParserMethod = "parseVectorList";
89 def VecListTwoD : RegisterOperand<DPR, "printVectorListTwo"> {
90 let ParserMatchClass = VecListTwoDAsmOperand;
92 // Register list of three sequential D registers.
93 def VecListThreeDAsmOperand : AsmOperandClass {
94 let Name = "VecListThreeD";
95 let ParserMethod = "parseVectorList";
97 def VecListThreeD : RegisterOperand<DPR, "printVectorListThree"> {
98 let ParserMatchClass = VecListThreeDAsmOperand;
100 // Register list of four sequential D registers.
101 def VecListFourDAsmOperand : AsmOperandClass {
102 let Name = "VecListFourD";
103 let ParserMethod = "parseVectorList";
105 def VecListFourD : RegisterOperand<DPR, "printVectorListFour"> {
106 let ParserMatchClass = VecListFourDAsmOperand;
108 // Register list of two D registers spaced by 2 (two sequential Q registers).
109 def VecListTwoQAsmOperand : AsmOperandClass {
110 let Name = "VecListTwoQ";
111 let ParserMethod = "parseVectorList";
113 def VecListTwoQ : RegisterOperand<DPR, "printVectorListTwo"> {
114 let ParserMatchClass = VecListTwoQAsmOperand;
117 //===----------------------------------------------------------------------===//
118 // NEON-specific DAG Nodes.
119 //===----------------------------------------------------------------------===//
121 def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
122 def SDTARMVCMPZ : SDTypeProfile<1, 1, []>;
124 def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
125 def NEONvceqz : SDNode<"ARMISD::VCEQZ", SDTARMVCMPZ>;
126 def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
127 def NEONvcgez : SDNode<"ARMISD::VCGEZ", SDTARMVCMPZ>;
128 def NEONvclez : SDNode<"ARMISD::VCLEZ", SDTARMVCMPZ>;
129 def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
130 def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
131 def NEONvcgtz : SDNode<"ARMISD::VCGTZ", SDTARMVCMPZ>;
132 def NEONvcltz : SDNode<"ARMISD::VCLTZ", SDTARMVCMPZ>;
133 def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
134 def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
136 // Types for vector shift by immediates. The "SHX" version is for long and
137 // narrow operations where the source and destination vectors have different
138 // types. The "SHINS" version is for shift and insert operations.
139 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
141 def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
143 def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
144 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
146 def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
147 def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
148 def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
149 def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
150 def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
151 def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
152 def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
154 def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
155 def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
156 def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
158 def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
159 def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
160 def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
161 def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
162 def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
163 def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
165 def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
166 def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
167 def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
169 def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
170 def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
172 def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
174 def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
175 def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
177 def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
178 def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
179 def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
180 def NEONvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>;
182 def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
184 def NEONvorrImm : SDNode<"ARMISD::VORRIMM", SDTARMVORRIMM>;
185 def NEONvbicImm : SDNode<"ARMISD::VBICIMM", SDTARMVORRIMM>;
187 def NEONvbsl : SDNode<"ARMISD::VBSL",
188 SDTypeProfile<1, 3, [SDTCisVec<0>,
191 SDTCisSameAs<0, 3>]>>;
193 def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
195 // VDUPLANE can produce a quad-register result from a double-register source,
196 // so the result is not constrained to match the source.
197 def NEONvduplane : SDNode<"ARMISD::VDUPLANE",
198 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
201 def SDTARMVEXT : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
202 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
203 def NEONvext : SDNode<"ARMISD::VEXT", SDTARMVEXT>;
205 def SDTARMVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
206 def NEONvrev64 : SDNode<"ARMISD::VREV64", SDTARMVSHUF>;
207 def NEONvrev32 : SDNode<"ARMISD::VREV32", SDTARMVSHUF>;
208 def NEONvrev16 : SDNode<"ARMISD::VREV16", SDTARMVSHUF>;
210 def SDTARMVSHUF2 : SDTypeProfile<2, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
212 SDTCisSameAs<0, 3>]>;
213 def NEONzip : SDNode<"ARMISD::VZIP", SDTARMVSHUF2>;
214 def NEONuzp : SDNode<"ARMISD::VUZP", SDTARMVSHUF2>;
215 def NEONtrn : SDNode<"ARMISD::VTRN", SDTARMVSHUF2>;
217 def SDTARMVMULL : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
218 SDTCisSameAs<1, 2>]>;
219 def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVMULL>;
220 def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVMULL>;
222 def SDTARMFMAX : SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisSameAs<0, 1>,
223 SDTCisSameAs<0, 2>]>;
224 def NEONfmax : SDNode<"ARMISD::FMAX", SDTARMFMAX>;
225 def NEONfmin : SDNode<"ARMISD::FMIN", SDTARMFMAX>;
227 def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{
228 ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
229 unsigned EltBits = 0;
230 uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
231 return (EltBits == 32 && EltVal == 0);
234 def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{
235 ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
236 unsigned EltBits = 0;
237 uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
238 return (EltBits == 8 && EltVal == 0xff);
241 //===----------------------------------------------------------------------===//
242 // NEON load / store instructions
243 //===----------------------------------------------------------------------===//
245 // Use VLDM to load a Q register as a D register pair.
246 // This is a pseudo instruction that is expanded to VLDMD after reg alloc.
248 : PseudoVFPLdStM<(outs QPR:$dst), (ins GPR:$Rn),
250 [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>;
252 // Use VSTM to store a Q register as a D register pair.
253 // This is a pseudo instruction that is expanded to VSTMD after reg alloc.
255 : PseudoVFPLdStM<(outs), (ins QPR:$src, GPR:$Rn),
257 [(store (v2f64 QPR:$src), GPR:$Rn)]>;
259 // Classes for VLD* pseudo-instructions with multi-register operands.
260 // These are expanded to real instructions after register allocation.
261 class VLDQPseudo<InstrItinClass itin>
262 : PseudoNLdSt<(outs QPR:$dst), (ins addrmode6:$addr), itin, "">;
263 class VLDQWBPseudo<InstrItinClass itin>
264 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
265 (ins addrmode6:$addr, am6offset:$offset), itin,
267 class VLDQWBfixedPseudo<InstrItinClass itin>
268 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
269 (ins addrmode6:$addr), itin,
271 class VLDQWBregisterPseudo<InstrItinClass itin>
272 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
273 (ins addrmode6:$addr, rGPR:$offset), itin,
275 class VLDQQPseudo<InstrItinClass itin>
276 : PseudoNLdSt<(outs QQPR:$dst), (ins addrmode6:$addr), itin, "">;
277 class VLDQQWBPseudo<InstrItinClass itin>
278 : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
279 (ins addrmode6:$addr, am6offset:$offset), itin,
281 class VLDQQQQPseudo<InstrItinClass itin>
282 : PseudoNLdSt<(outs QQQQPR:$dst), (ins addrmode6:$addr, QQQQPR:$src),itin,
284 class VLDQQQQWBPseudo<InstrItinClass itin>
285 : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
286 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
287 "$addr.addr = $wb, $src = $dst">;
289 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
291 // VLD1 : Vector Load (multiple single elements)
292 class VLD1D<bits<4> op7_4, string Dt>
293 : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd),
294 (ins addrmode6:$Rn), IIC_VLD1,
295 "vld1", Dt, "$Vd, $Rn", "", []> {
298 let DecoderMethod = "DecodeVLDInstruction";
300 class VLD1Q<bits<4> op7_4, string Dt>
301 : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd),
302 (ins addrmode6:$Rn), IIC_VLD1x2,
303 "vld1", Dt, "$Vd, $Rn", "", []> {
305 let Inst{5-4} = Rn{5-4};
306 let DecoderMethod = "DecodeVLDInstruction";
309 def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
310 def VLD1d16 : VLD1D<{0,1,0,?}, "16">;
311 def VLD1d32 : VLD1D<{1,0,0,?}, "32">;
312 def VLD1d64 : VLD1D<{1,1,0,?}, "64">;
314 def VLD1q8 : VLD1Q<{0,0,?,?}, "8">;
315 def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
316 def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
317 def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
319 def VLD1q8Pseudo : VLDQPseudo<IIC_VLD1x2>;
320 def VLD1q16Pseudo : VLDQPseudo<IIC_VLD1x2>;
321 def VLD1q32Pseudo : VLDQPseudo<IIC_VLD1x2>;
322 def VLD1q64Pseudo : VLDQPseudo<IIC_VLD1x2>;
324 // ...with address register writeback:
325 multiclass VLD1DWB<bits<4> op7_4, string Dt> {
326 def _fixed : NLdSt<0,0b10, 0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
327 (ins addrmode6:$Rn), IIC_VLD1u,
328 "vld1", Dt, "$Vd, $Rn!",
329 "$Rn.addr = $wb", []> {
330 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
332 let DecoderMethod = "DecodeVLDInstruction";
333 let AsmMatchConverter = "cvtVLDwbFixed";
335 def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
336 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
337 "vld1", Dt, "$Vd, $Rn, $Rm",
338 "$Rn.addr = $wb", []> {
340 let DecoderMethod = "DecodeVLDInstruction";
341 let AsmMatchConverter = "cvtVLDwbRegister";
344 multiclass VLD1QWB<bits<4> op7_4, string Dt> {
345 def _fixed : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd, GPR:$wb),
346 (ins addrmode6:$Rn), IIC_VLD1x2u,
347 "vld1", Dt, "$Vd, $Rn!",
348 "$Rn.addr = $wb", []> {
349 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
350 let Inst{5-4} = Rn{5-4};
351 let DecoderMethod = "DecodeVLDInstruction";
352 let AsmMatchConverter = "cvtVLDwbFixed";
354 def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd, GPR:$wb),
355 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
356 "vld1", Dt, "$Vd, $Rn, $Rm",
357 "$Rn.addr = $wb", []> {
358 let Inst{5-4} = Rn{5-4};
359 let DecoderMethod = "DecodeVLDInstruction";
360 let AsmMatchConverter = "cvtVLDwbRegister";
364 defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8">;
365 defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16">;
366 defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32">;
367 defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64">;
368 defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8">;
369 defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16">;
370 defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32">;
371 defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64">;
373 def VLD1q8PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
374 def VLD1q16PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
375 def VLD1q32PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
376 def VLD1q64PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
377 def VLD1q8PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
378 def VLD1q16PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
379 def VLD1q32PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
380 def VLD1q64PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
382 // ...with 3 registers
383 class VLD1D3<bits<4> op7_4, string Dt>
384 : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd),
385 (ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
386 "$Vd, $Rn", "", []> {
389 let DecoderMethod = "DecodeVLDInstruction";
391 multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
392 def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
393 (ins addrmode6:$Rn), IIC_VLD1x2u,
394 "vld1", Dt, "$Vd, $Rn!",
395 "$Rn.addr = $wb", []> {
396 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
398 let DecoderMethod = "DecodeVLDInstruction";
399 let AsmMatchConverter = "cvtVLDwbFixed";
401 def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
402 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
403 "vld1", Dt, "$Vd, $Rn, $Rm",
404 "$Rn.addr = $wb", []> {
406 let DecoderMethod = "DecodeVLDInstruction";
407 let AsmMatchConverter = "cvtVLDwbRegister";
411 def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
412 def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
413 def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
414 def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
416 defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8">;
417 defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16">;
418 defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32">;
419 defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64">;
421 def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
423 // ...with 4 registers
424 class VLD1D4<bits<4> op7_4, string Dt>
425 : NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd),
426 (ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
427 "$Vd, $Rn", "", []> {
429 let Inst{5-4} = Rn{5-4};
430 let DecoderMethod = "DecodeVLDInstruction";
432 multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
433 def _fixed : NLdSt<0,0b10,0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
434 (ins addrmode6:$Rn), IIC_VLD1x2u,
435 "vld1", Dt, "$Vd, $Rn!",
436 "$Rn.addr = $wb", []> {
437 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
438 let Inst{5-4} = Rn{5-4};
439 let DecoderMethod = "DecodeVLDInstruction";
440 let AsmMatchConverter = "cvtVLDwbFixed";
442 def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb),
443 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
444 "vld1", Dt, "$Vd, $Rn, $Rm",
445 "$Rn.addr = $wb", []> {
446 let Inst{5-4} = Rn{5-4};
447 let DecoderMethod = "DecodeVLDInstruction";
448 let AsmMatchConverter = "cvtVLDwbRegister";
452 def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
453 def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
454 def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
455 def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
457 defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8">;
458 defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16">;
459 defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32">;
460 defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64">;
462 def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
464 // VLD2 : Vector Load (multiple 2-element structures)
465 class VLD2D<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy>
466 : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd),
467 (ins addrmode6:$Rn), IIC_VLD2,
468 "vld2", Dt, "$Vd, $Rn", "", []> {
470 let Inst{5-4} = Rn{5-4};
471 let DecoderMethod = "DecodeVLDInstruction";
473 class VLD2Q<bits<4> op7_4, string Dt, RegisterOperand VdTy>
474 : NLdSt<0, 0b10, 0b0011, op7_4,
476 (ins addrmode6:$Rn), IIC_VLD2x2,
477 "vld2", Dt, "$Vd, $Rn", "", []> {
479 let Inst{5-4} = Rn{5-4};
480 let DecoderMethod = "DecodeVLDInstruction";
483 def VLD2d8 : VLD2D<0b1000, {0,0,?,?}, "8", VecListTwoD>;
484 def VLD2d16 : VLD2D<0b1000, {0,1,?,?}, "16", VecListTwoD>;
485 def VLD2d32 : VLD2D<0b1000, {1,0,?,?}, "32", VecListTwoD>;
487 def VLD2q8 : VLD2Q<{0,0,?,?}, "8", VecListFourD>;
488 def VLD2q16 : VLD2Q<{0,1,?,?}, "16", VecListFourD>;
489 def VLD2q32 : VLD2Q<{1,0,?,?}, "32", VecListFourD>;
491 def VLD2d8Pseudo : VLDQPseudo<IIC_VLD2>;
492 def VLD2d16Pseudo : VLDQPseudo<IIC_VLD2>;
493 def VLD2d32Pseudo : VLDQPseudo<IIC_VLD2>;
495 def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
496 def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
497 def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD2x2>;
499 // ...with address register writeback:
500 class VLD2DWB<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy>
501 : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
502 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2u,
503 "vld2", Dt, "$Vd, $Rn$Rm",
504 "$Rn.addr = $wb", []> {
505 let Inst{5-4} = Rn{5-4};
506 let DecoderMethod = "DecodeVLDInstruction";
508 class VLD2QWB<bits<4> op7_4, string Dt, RegisterOperand VdTy>
509 : NLdSt<0, 0b10, 0b0011, op7_4,
510 (outs VdTy:$Vd, GPR:$wb),
511 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2x2u,
512 "vld2", Dt, "$Vd, $Rn$Rm",
513 "$Rn.addr = $wb", []> {
514 let Inst{5-4} = Rn{5-4};
515 let DecoderMethod = "DecodeVLDInstruction";
518 def VLD2d8_UPD : VLD2DWB<0b1000, {0,0,?,?}, "8", VecListTwoD>;
519 def VLD2d16_UPD : VLD2DWB<0b1000, {0,1,?,?}, "16", VecListTwoD>;
520 def VLD2d32_UPD : VLD2DWB<0b1000, {1,0,?,?}, "32", VecListTwoD>;
522 def VLD2q8_UPD : VLD2QWB<{0,0,?,?}, "8", VecListFourD>;
523 def VLD2q16_UPD : VLD2QWB<{0,1,?,?}, "16", VecListFourD>;
524 def VLD2q32_UPD : VLD2QWB<{1,0,?,?}, "32", VecListFourD>;
526 def VLD2d8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
527 def VLD2d16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
528 def VLD2d32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
530 def VLD2q8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
531 def VLD2q16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
532 def VLD2q32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
534 // ...with double-spaced registers
535 def VLD2b8 : VLD2D<0b1001, {0,0,?,?}, "8", VecListTwoQ>;
536 def VLD2b16 : VLD2D<0b1001, {0,1,?,?}, "16", VecListTwoQ>;
537 def VLD2b32 : VLD2D<0b1001, {1,0,?,?}, "32", VecListTwoQ>;
538 def VLD2b8_UPD : VLD2DWB<0b1001, {0,0,?,?}, "8", VecListTwoQ>;
539 def VLD2b16_UPD : VLD2DWB<0b1001, {0,1,?,?}, "16", VecListTwoQ>;
540 def VLD2b32_UPD : VLD2DWB<0b1001, {1,0,?,?}, "32", VecListTwoQ>;
542 // VLD3 : Vector Load (multiple 3-element structures)
543 class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
544 : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
545 (ins addrmode6:$Rn), IIC_VLD3,
546 "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
549 let DecoderMethod = "DecodeVLDInstruction";
552 def VLD3d8 : VLD3D<0b0100, {0,0,0,?}, "8">;
553 def VLD3d16 : VLD3D<0b0100, {0,1,0,?}, "16">;
554 def VLD3d32 : VLD3D<0b0100, {1,0,0,?}, "32">;
556 def VLD3d8Pseudo : VLDQQPseudo<IIC_VLD3>;
557 def VLD3d16Pseudo : VLDQQPseudo<IIC_VLD3>;
558 def VLD3d32Pseudo : VLDQQPseudo<IIC_VLD3>;
560 // ...with address register writeback:
561 class VLD3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
562 : NLdSt<0, 0b10, op11_8, op7_4,
563 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
564 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD3u,
565 "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm",
566 "$Rn.addr = $wb", []> {
568 let DecoderMethod = "DecodeVLDInstruction";
571 def VLD3d8_UPD : VLD3DWB<0b0100, {0,0,0,?}, "8">;
572 def VLD3d16_UPD : VLD3DWB<0b0100, {0,1,0,?}, "16">;
573 def VLD3d32_UPD : VLD3DWB<0b0100, {1,0,0,?}, "32">;
575 def VLD3d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
576 def VLD3d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
577 def VLD3d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
579 // ...with double-spaced registers:
580 def VLD3q8 : VLD3D<0b0101, {0,0,0,?}, "8">;
581 def VLD3q16 : VLD3D<0b0101, {0,1,0,?}, "16">;
582 def VLD3q32 : VLD3D<0b0101, {1,0,0,?}, "32">;
583 def VLD3q8_UPD : VLD3DWB<0b0101, {0,0,0,?}, "8">;
584 def VLD3q16_UPD : VLD3DWB<0b0101, {0,1,0,?}, "16">;
585 def VLD3q32_UPD : VLD3DWB<0b0101, {1,0,0,?}, "32">;
587 def VLD3q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
588 def VLD3q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
589 def VLD3q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
591 // ...alternate versions to be allocated odd register numbers:
592 def VLD3q8oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
593 def VLD3q16oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
594 def VLD3q32oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
596 def VLD3q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
597 def VLD3q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
598 def VLD3q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
600 // VLD4 : Vector Load (multiple 4-element structures)
601 class VLD4D<bits<4> op11_8, bits<4> op7_4, string Dt>
602 : NLdSt<0, 0b10, op11_8, op7_4,
603 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
604 (ins addrmode6:$Rn), IIC_VLD4,
605 "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
607 let Inst{5-4} = Rn{5-4};
608 let DecoderMethod = "DecodeVLDInstruction";
611 def VLD4d8 : VLD4D<0b0000, {0,0,?,?}, "8">;
612 def VLD4d16 : VLD4D<0b0000, {0,1,?,?}, "16">;
613 def VLD4d32 : VLD4D<0b0000, {1,0,?,?}, "32">;
615 def VLD4d8Pseudo : VLDQQPseudo<IIC_VLD4>;
616 def VLD4d16Pseudo : VLDQQPseudo<IIC_VLD4>;
617 def VLD4d32Pseudo : VLDQQPseudo<IIC_VLD4>;
619 // ...with address register writeback:
620 class VLD4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
621 : NLdSt<0, 0b10, op11_8, op7_4,
622 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
623 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD4u,
624 "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
625 "$Rn.addr = $wb", []> {
626 let Inst{5-4} = Rn{5-4};
627 let DecoderMethod = "DecodeVLDInstruction";
630 def VLD4d8_UPD : VLD4DWB<0b0000, {0,0,?,?}, "8">;
631 def VLD4d16_UPD : VLD4DWB<0b0000, {0,1,?,?}, "16">;
632 def VLD4d32_UPD : VLD4DWB<0b0000, {1,0,?,?}, "32">;
634 def VLD4d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
635 def VLD4d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
636 def VLD4d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
638 // ...with double-spaced registers:
639 def VLD4q8 : VLD4D<0b0001, {0,0,?,?}, "8">;
640 def VLD4q16 : VLD4D<0b0001, {0,1,?,?}, "16">;
641 def VLD4q32 : VLD4D<0b0001, {1,0,?,?}, "32">;
642 def VLD4q8_UPD : VLD4DWB<0b0001, {0,0,?,?}, "8">;
643 def VLD4q16_UPD : VLD4DWB<0b0001, {0,1,?,?}, "16">;
644 def VLD4q32_UPD : VLD4DWB<0b0001, {1,0,?,?}, "32">;
646 def VLD4q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
647 def VLD4q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
648 def VLD4q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
650 // ...alternate versions to be allocated odd register numbers:
651 def VLD4q8oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
652 def VLD4q16oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
653 def VLD4q32oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
655 def VLD4q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
656 def VLD4q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
657 def VLD4q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
659 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
661 // Classes for VLD*LN pseudo-instructions with multi-register operands.
662 // These are expanded to real instructions after register allocation.
663 class VLDQLNPseudo<InstrItinClass itin>
664 : PseudoNLdSt<(outs QPR:$dst),
665 (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
666 itin, "$src = $dst">;
667 class VLDQLNWBPseudo<InstrItinClass itin>
668 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
669 (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
670 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
671 class VLDQQLNPseudo<InstrItinClass itin>
672 : PseudoNLdSt<(outs QQPR:$dst),
673 (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
674 itin, "$src = $dst">;
675 class VLDQQLNWBPseudo<InstrItinClass itin>
676 : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
677 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
678 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
679 class VLDQQQQLNPseudo<InstrItinClass itin>
680 : PseudoNLdSt<(outs QQQQPR:$dst),
681 (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
682 itin, "$src = $dst">;
683 class VLDQQQQLNWBPseudo<InstrItinClass itin>
684 : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
685 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
686 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
688 // VLD1LN : Vector Load (single element to one lane)
689 class VLD1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
691 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
692 (ins addrmode6:$Rn, DPR:$src, nohash_imm:$lane),
693 IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
695 [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
696 (i32 (LoadOp addrmode6:$Rn)),
699 let DecoderMethod = "DecodeVLD1LN";
701 class VLD1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
703 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
704 (ins addrmode6oneL32:$Rn, DPR:$src, nohash_imm:$lane),
705 IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
707 [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
708 (i32 (LoadOp addrmode6oneL32:$Rn)),
711 let DecoderMethod = "DecodeVLD1LN";
713 class VLD1QLNPseudo<ValueType Ty, PatFrag LoadOp> : VLDQLNPseudo<IIC_VLD1ln> {
714 let Pattern = [(set QPR:$dst, (vector_insert (Ty QPR:$src),
715 (i32 (LoadOp addrmode6:$addr)),
719 def VLD1LNd8 : VLD1LN<0b0000, {?,?,?,0}, "8", v8i8, extloadi8> {
720 let Inst{7-5} = lane{2-0};
722 def VLD1LNd16 : VLD1LN<0b0100, {?,?,0,?}, "16", v4i16, extloadi16> {
723 let Inst{7-6} = lane{1-0};
726 def VLD1LNd32 : VLD1LN32<0b1000, {?,0,?,?}, "32", v2i32, load> {
727 let Inst{7} = lane{0};
732 def VLD1LNq8Pseudo : VLD1QLNPseudo<v16i8, extloadi8>;
733 def VLD1LNq16Pseudo : VLD1QLNPseudo<v8i16, extloadi16>;
734 def VLD1LNq32Pseudo : VLD1QLNPseudo<v4i32, load>;
736 def : Pat<(vector_insert (v2f32 DPR:$src),
737 (f32 (load addrmode6:$addr)), imm:$lane),
738 (VLD1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
739 def : Pat<(vector_insert (v4f32 QPR:$src),
740 (f32 (load addrmode6:$addr)), imm:$lane),
741 (VLD1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
743 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
745 // ...with address register writeback:
746 class VLD1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
747 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, GPR:$wb),
748 (ins addrmode6:$Rn, am6offset:$Rm,
749 DPR:$src, nohash_imm:$lane), IIC_VLD1lnu, "vld1", Dt,
750 "\\{$Vd[$lane]\\}, $Rn$Rm",
751 "$src = $Vd, $Rn.addr = $wb", []> {
752 let DecoderMethod = "DecodeVLD1LN";
755 def VLD1LNd8_UPD : VLD1LNWB<0b0000, {?,?,?,0}, "8"> {
756 let Inst{7-5} = lane{2-0};
758 def VLD1LNd16_UPD : VLD1LNWB<0b0100, {?,?,0,?}, "16"> {
759 let Inst{7-6} = lane{1-0};
762 def VLD1LNd32_UPD : VLD1LNWB<0b1000, {?,0,?,?}, "32"> {
763 let Inst{7} = lane{0};
768 def VLD1LNq8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
769 def VLD1LNq16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
770 def VLD1LNq32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
772 // VLD2LN : Vector Load (single 2-element structure to one lane)
773 class VLD2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
774 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2),
775 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, nohash_imm:$lane),
776 IIC_VLD2ln, "vld2", Dt, "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn",
777 "$src1 = $Vd, $src2 = $dst2", []> {
780 let DecoderMethod = "DecodeVLD2LN";
783 def VLD2LNd8 : VLD2LN<0b0001, {?,?,?,?}, "8"> {
784 let Inst{7-5} = lane{2-0};
786 def VLD2LNd16 : VLD2LN<0b0101, {?,?,0,?}, "16"> {
787 let Inst{7-6} = lane{1-0};
789 def VLD2LNd32 : VLD2LN<0b1001, {?,0,0,?}, "32"> {
790 let Inst{7} = lane{0};
793 def VLD2LNd8Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
794 def VLD2LNd16Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
795 def VLD2LNd32Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
797 // ...with double-spaced registers:
798 def VLD2LNq16 : VLD2LN<0b0101, {?,?,1,?}, "16"> {
799 let Inst{7-6} = lane{1-0};
801 def VLD2LNq32 : VLD2LN<0b1001, {?,1,0,?}, "32"> {
802 let Inst{7} = lane{0};
805 def VLD2LNq16Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
806 def VLD2LNq32Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
808 // ...with address register writeback:
809 class VLD2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
810 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
811 (ins addrmode6:$Rn, am6offset:$Rm,
812 DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VLD2lnu, "vld2", Dt,
813 "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn$Rm",
814 "$src1 = $Vd, $src2 = $dst2, $Rn.addr = $wb", []> {
816 let DecoderMethod = "DecodeVLD2LN";
819 def VLD2LNd8_UPD : VLD2LNWB<0b0001, {?,?,?,?}, "8"> {
820 let Inst{7-5} = lane{2-0};
822 def VLD2LNd16_UPD : VLD2LNWB<0b0101, {?,?,0,?}, "16"> {
823 let Inst{7-6} = lane{1-0};
825 def VLD2LNd32_UPD : VLD2LNWB<0b1001, {?,0,0,?}, "32"> {
826 let Inst{7} = lane{0};
829 def VLD2LNd8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
830 def VLD2LNd16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
831 def VLD2LNd32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
833 def VLD2LNq16_UPD : VLD2LNWB<0b0101, {?,?,1,?}, "16"> {
834 let Inst{7-6} = lane{1-0};
836 def VLD2LNq32_UPD : VLD2LNWB<0b1001, {?,1,0,?}, "32"> {
837 let Inst{7} = lane{0};
840 def VLD2LNq16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
841 def VLD2LNq32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
843 // VLD3LN : Vector Load (single 3-element structure to one lane)
844 class VLD3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
845 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
846 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3,
847 nohash_imm:$lane), IIC_VLD3ln, "vld3", Dt,
848 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn",
849 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3", []> {
851 let DecoderMethod = "DecodeVLD3LN";
854 def VLD3LNd8 : VLD3LN<0b0010, {?,?,?,0}, "8"> {
855 let Inst{7-5} = lane{2-0};
857 def VLD3LNd16 : VLD3LN<0b0110, {?,?,0,0}, "16"> {
858 let Inst{7-6} = lane{1-0};
860 def VLD3LNd32 : VLD3LN<0b1010, {?,0,0,0}, "32"> {
861 let Inst{7} = lane{0};
864 def VLD3LNd8Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
865 def VLD3LNd16Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
866 def VLD3LNd32Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
868 // ...with double-spaced registers:
869 def VLD3LNq16 : VLD3LN<0b0110, {?,?,1,0}, "16"> {
870 let Inst{7-6} = lane{1-0};
872 def VLD3LNq32 : VLD3LN<0b1010, {?,1,0,0}, "32"> {
873 let Inst{7} = lane{0};
876 def VLD3LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
877 def VLD3LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
879 // ...with address register writeback:
880 class VLD3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
881 : NLdStLn<1, 0b10, op11_8, op7_4,
882 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
883 (ins addrmode6:$Rn, am6offset:$Rm,
884 DPR:$src1, DPR:$src2, DPR:$src3, nohash_imm:$lane),
885 IIC_VLD3lnu, "vld3", Dt,
886 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn$Rm",
887 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $Rn.addr = $wb",
889 let DecoderMethod = "DecodeVLD3LN";
892 def VLD3LNd8_UPD : VLD3LNWB<0b0010, {?,?,?,0}, "8"> {
893 let Inst{7-5} = lane{2-0};
895 def VLD3LNd16_UPD : VLD3LNWB<0b0110, {?,?,0,0}, "16"> {
896 let Inst{7-6} = lane{1-0};
898 def VLD3LNd32_UPD : VLD3LNWB<0b1010, {?,0,0,0}, "32"> {
899 let Inst{7} = lane{0};
902 def VLD3LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
903 def VLD3LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
904 def VLD3LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
906 def VLD3LNq16_UPD : VLD3LNWB<0b0110, {?,?,1,0}, "16"> {
907 let Inst{7-6} = lane{1-0};
909 def VLD3LNq32_UPD : VLD3LNWB<0b1010, {?,1,0,0}, "32"> {
910 let Inst{7} = lane{0};
913 def VLD3LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
914 def VLD3LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
916 // VLD4LN : Vector Load (single 4-element structure to one lane)
917 class VLD4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
918 : NLdStLn<1, 0b10, op11_8, op7_4,
919 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
920 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
921 nohash_imm:$lane), IIC_VLD4ln, "vld4", Dt,
922 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn",
923 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []> {
926 let DecoderMethod = "DecodeVLD4LN";
929 def VLD4LNd8 : VLD4LN<0b0011, {?,?,?,?}, "8"> {
930 let Inst{7-5} = lane{2-0};
932 def VLD4LNd16 : VLD4LN<0b0111, {?,?,0,?}, "16"> {
933 let Inst{7-6} = lane{1-0};
935 def VLD4LNd32 : VLD4LN<0b1011, {?,0,?,?}, "32"> {
936 let Inst{7} = lane{0};
940 def VLD4LNd8Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
941 def VLD4LNd16Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
942 def VLD4LNd32Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
944 // ...with double-spaced registers:
945 def VLD4LNq16 : VLD4LN<0b0111, {?,?,1,?}, "16"> {
946 let Inst{7-6} = lane{1-0};
948 def VLD4LNq32 : VLD4LN<0b1011, {?,1,?,?}, "32"> {
949 let Inst{7} = lane{0};
953 def VLD4LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
954 def VLD4LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
956 // ...with address register writeback:
957 class VLD4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
958 : NLdStLn<1, 0b10, op11_8, op7_4,
959 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
960 (ins addrmode6:$Rn, am6offset:$Rm,
961 DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
962 IIC_VLD4lnu, "vld4", Dt,
963 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn$Rm",
964 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4, $Rn.addr = $wb",
967 let DecoderMethod = "DecodeVLD4LN" ;
970 def VLD4LNd8_UPD : VLD4LNWB<0b0011, {?,?,?,?}, "8"> {
971 let Inst{7-5} = lane{2-0};
973 def VLD4LNd16_UPD : VLD4LNWB<0b0111, {?,?,0,?}, "16"> {
974 let Inst{7-6} = lane{1-0};
976 def VLD4LNd32_UPD : VLD4LNWB<0b1011, {?,0,?,?}, "32"> {
977 let Inst{7} = lane{0};
981 def VLD4LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
982 def VLD4LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
983 def VLD4LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
985 def VLD4LNq16_UPD : VLD4LNWB<0b0111, {?,?,1,?}, "16"> {
986 let Inst{7-6} = lane{1-0};
988 def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32"> {
989 let Inst{7} = lane{0};
993 def VLD4LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
994 def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
996 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
998 // VLD1DUP : Vector Load (single element to all lanes)
999 class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
1000 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd), (ins addrmode6dup:$Rn),
1001 IIC_VLD1dup, "vld1", Dt, "\\{$Vd[]\\}, $Rn", "",
1002 [(set DPR:$Vd, (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
1004 let Inst{4} = Rn{4};
1005 let DecoderMethod = "DecodeVLD1DupInstruction";
1007 class VLD1QDUPPseudo<ValueType Ty, PatFrag LoadOp> : VLDQPseudo<IIC_VLD1dup> {
1008 let Pattern = [(set QPR:$dst,
1009 (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$addr)))))];
1012 def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
1013 def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
1014 def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
1016 def VLD1DUPq8Pseudo : VLD1QDUPPseudo<v16i8, extloadi8>;
1017 def VLD1DUPq16Pseudo : VLD1QDUPPseudo<v8i16, extloadi16>;
1018 def VLD1DUPq32Pseudo : VLD1QDUPPseudo<v4i32, load>;
1020 def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
1021 (VLD1DUPd32 addrmode6:$addr)>;
1022 def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
1023 (VLD1DUPq32Pseudo addrmode6:$addr)>;
1025 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
1027 class VLD1QDUP<bits<4> op7_4, string Dt>
1028 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2),
1029 (ins addrmode6dup:$Rn), IIC_VLD1dup,
1030 "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
1032 let Inst{4} = Rn{4};
1033 let DecoderMethod = "DecodeVLD1DupInstruction";
1036 def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8">;
1037 def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16">;
1038 def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32">;
1040 // ...with address register writeback:
1041 class VLD1DUPWB<bits<4> op7_4, string Dt>
1042 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, GPR:$wb),
1043 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
1044 "vld1", Dt, "\\{$Vd[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1045 let Inst{4} = Rn{4};
1046 let DecoderMethod = "DecodeVLD1DupInstruction";
1048 class VLD1QDUPWB<bits<4> op7_4, string Dt>
1049 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
1050 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
1051 "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1052 let Inst{4} = Rn{4};
1053 let DecoderMethod = "DecodeVLD1DupInstruction";
1056 def VLD1DUPd8_UPD : VLD1DUPWB<{0,0,0,0}, "8">;
1057 def VLD1DUPd16_UPD : VLD1DUPWB<{0,1,0,?}, "16">;
1058 def VLD1DUPd32_UPD : VLD1DUPWB<{1,0,0,?}, "32">;
1060 def VLD1DUPq8_UPD : VLD1QDUPWB<{0,0,1,0}, "8">;
1061 def VLD1DUPq16_UPD : VLD1QDUPWB<{0,1,1,?}, "16">;
1062 def VLD1DUPq32_UPD : VLD1QDUPWB<{1,0,1,?}, "32">;
1064 def VLD1DUPq8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1065 def VLD1DUPq16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1066 def VLD1DUPq32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1068 // VLD2DUP : Vector Load (single 2-element structure to all lanes)
1069 class VLD2DUP<bits<4> op7_4, string Dt>
1070 : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2),
1071 (ins addrmode6dup:$Rn), IIC_VLD2dup,
1072 "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
1074 let Inst{4} = Rn{4};
1075 let DecoderMethod = "DecodeVLD2DupInstruction";
1078 def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8">;
1079 def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16">;
1080 def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32">;
1082 def VLD2DUPd8Pseudo : VLDQPseudo<IIC_VLD2dup>;
1083 def VLD2DUPd16Pseudo : VLDQPseudo<IIC_VLD2dup>;
1084 def VLD2DUPd32Pseudo : VLDQPseudo<IIC_VLD2dup>;
1086 // ...with double-spaced registers (not used for codegen):
1087 def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8">;
1088 def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16">;
1089 def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32">;
1091 // ...with address register writeback:
1092 class VLD2DUPWB<bits<4> op7_4, string Dt>
1093 : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
1094 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD2dupu,
1095 "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1096 let Inst{4} = Rn{4};
1097 let DecoderMethod = "DecodeVLD2DupInstruction";
1100 def VLD2DUPd8_UPD : VLD2DUPWB<{0,0,0,0}, "8">;
1101 def VLD2DUPd16_UPD : VLD2DUPWB<{0,1,0,?}, "16">;
1102 def VLD2DUPd32_UPD : VLD2DUPWB<{1,0,0,?}, "32">;
1104 def VLD2DUPd8x2_UPD : VLD2DUPWB<{0,0,1,0}, "8">;
1105 def VLD2DUPd16x2_UPD : VLD2DUPWB<{0,1,1,?}, "16">;
1106 def VLD2DUPd32x2_UPD : VLD2DUPWB<{1,0,1,?}, "32">;
1108 def VLD2DUPd8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1109 def VLD2DUPd16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1110 def VLD2DUPd32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1112 // VLD3DUP : Vector Load (single 3-element structure to all lanes)
1113 class VLD3DUP<bits<4> op7_4, string Dt>
1114 : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
1115 (ins addrmode6dup:$Rn), IIC_VLD3dup,
1116 "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn", "", []> {
1119 let DecoderMethod = "DecodeVLD3DupInstruction";
1122 def VLD3DUPd8 : VLD3DUP<{0,0,0,?}, "8">;
1123 def VLD3DUPd16 : VLD3DUP<{0,1,0,?}, "16">;
1124 def VLD3DUPd32 : VLD3DUP<{1,0,0,?}, "32">;
1126 def VLD3DUPd8Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1127 def VLD3DUPd16Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1128 def VLD3DUPd32Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1130 // ...with double-spaced registers (not used for codegen):
1131 def VLD3DUPd8x2 : VLD3DUP<{0,0,1,?}, "8">;
1132 def VLD3DUPd16x2 : VLD3DUP<{0,1,1,?}, "16">;
1133 def VLD3DUPd32x2 : VLD3DUP<{1,0,1,?}, "32">;
1135 // ...with address register writeback:
1136 class VLD3DUPWB<bits<4> op7_4, string Dt>
1137 : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
1138 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD3dupu,
1139 "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn$Rm",
1140 "$Rn.addr = $wb", []> {
1142 let DecoderMethod = "DecodeVLD3DupInstruction";
1145 def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
1146 def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
1147 def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
1149 def VLD3DUPd8x2_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
1150 def VLD3DUPd16x2_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
1151 def VLD3DUPd32x2_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
1153 def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1154 def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1155 def VLD3DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1157 // VLD4DUP : Vector Load (single 4-element structure to all lanes)
1158 class VLD4DUP<bits<4> op7_4, string Dt>
1159 : NLdSt<1, 0b10, 0b1111, op7_4,
1160 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
1161 (ins addrmode6dup:$Rn), IIC_VLD4dup,
1162 "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn", "", []> {
1164 let Inst{4} = Rn{4};
1165 let DecoderMethod = "DecodeVLD4DupInstruction";
1168 def VLD4DUPd8 : VLD4DUP<{0,0,0,?}, "8">;
1169 def VLD4DUPd16 : VLD4DUP<{0,1,0,?}, "16">;
1170 def VLD4DUPd32 : VLD4DUP<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
1172 def VLD4DUPd8Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1173 def VLD4DUPd16Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1174 def VLD4DUPd32Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1176 // ...with double-spaced registers (not used for codegen):
1177 def VLD4DUPd8x2 : VLD4DUP<{0,0,1,?}, "8">;
1178 def VLD4DUPd16x2 : VLD4DUP<{0,1,1,?}, "16">;
1179 def VLD4DUPd32x2 : VLD4DUP<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
1181 // ...with address register writeback:
1182 class VLD4DUPWB<bits<4> op7_4, string Dt>
1183 : NLdSt<1, 0b10, 0b1111, op7_4,
1184 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
1185 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD4dupu,
1186 "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn$Rm",
1187 "$Rn.addr = $wb", []> {
1188 let Inst{4} = Rn{4};
1189 let DecoderMethod = "DecodeVLD4DupInstruction";
1192 def VLD4DUPd8_UPD : VLD4DUPWB<{0,0,0,0}, "8">;
1193 def VLD4DUPd16_UPD : VLD4DUPWB<{0,1,0,?}, "16">;
1194 def VLD4DUPd32_UPD : VLD4DUPWB<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
1196 def VLD4DUPd8x2_UPD : VLD4DUPWB<{0,0,1,0}, "8">;
1197 def VLD4DUPd16x2_UPD : VLD4DUPWB<{0,1,1,?}, "16">;
1198 def VLD4DUPd32x2_UPD : VLD4DUPWB<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
1200 def VLD4DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1201 def VLD4DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1202 def VLD4DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1204 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
1206 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
1208 // Classes for VST* pseudo-instructions with multi-register operands.
1209 // These are expanded to real instructions after register allocation.
1210 class VSTQPseudo<InstrItinClass itin>
1211 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src), itin, "">;
1212 class VSTQWBPseudo<InstrItinClass itin>
1213 : PseudoNLdSt<(outs GPR:$wb),
1214 (ins addrmode6:$addr, am6offset:$offset, QPR:$src), itin,
1215 "$addr.addr = $wb">;
1216 class VSTQWBfixedPseudo<InstrItinClass itin>
1217 : PseudoNLdSt<(outs GPR:$wb),
1218 (ins addrmode6:$addr, QPR:$src), itin,
1219 "$addr.addr = $wb">;
1220 class VSTQWBregisterPseudo<InstrItinClass itin>
1221 : PseudoNLdSt<(outs GPR:$wb),
1222 (ins addrmode6:$addr, rGPR:$offset, QPR:$src), itin,
1223 "$addr.addr = $wb">;
1224 class VSTQQPseudo<InstrItinClass itin>
1225 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src), itin, "">;
1226 class VSTQQWBPseudo<InstrItinClass itin>
1227 : PseudoNLdSt<(outs GPR:$wb),
1228 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src), itin,
1229 "$addr.addr = $wb">;
1230 class VSTQQQQPseudo<InstrItinClass itin>
1231 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src), itin, "">;
1232 class VSTQQQQWBPseudo<InstrItinClass itin>
1233 : PseudoNLdSt<(outs GPR:$wb),
1234 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
1235 "$addr.addr = $wb">;
1237 // VST1 : Vector Store (multiple single elements)
1238 class VST1D<bits<4> op7_4, string Dt>
1239 : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, VecListOneD:$Vd),
1240 IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
1242 let Inst{4} = Rn{4};
1243 let DecoderMethod = "DecodeVSTInstruction";
1245 class VST1Q<bits<4> op7_4, string Dt>
1246 : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListTwoD:$Vd),
1247 IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
1249 let Inst{5-4} = Rn{5-4};
1250 let DecoderMethod = "DecodeVSTInstruction";
1253 def VST1d8 : VST1D<{0,0,0,?}, "8">;
1254 def VST1d16 : VST1D<{0,1,0,?}, "16">;
1255 def VST1d32 : VST1D<{1,0,0,?}, "32">;
1256 def VST1d64 : VST1D<{1,1,0,?}, "64">;
1258 def VST1q8 : VST1Q<{0,0,?,?}, "8">;
1259 def VST1q16 : VST1Q<{0,1,?,?}, "16">;
1260 def VST1q32 : VST1Q<{1,0,?,?}, "32">;
1261 def VST1q64 : VST1Q<{1,1,?,?}, "64">;
1263 def VST1q8Pseudo : VSTQPseudo<IIC_VST1x2>;
1264 def VST1q16Pseudo : VSTQPseudo<IIC_VST1x2>;
1265 def VST1q32Pseudo : VSTQPseudo<IIC_VST1x2>;
1266 def VST1q64Pseudo : VSTQPseudo<IIC_VST1x2>;
1268 // ...with address register writeback:
1269 multiclass VST1DWB<bits<4> op7_4, string Dt> {
1270 def _fixed : NLdSt<0,0b00, 0b0111,op7_4, (outs GPR:$wb),
1271 (ins addrmode6:$Rn, VecListOneD:$Vd), IIC_VLD1u,
1272 "vst1", Dt, "$Vd, $Rn!",
1273 "$Rn.addr = $wb", []> {
1274 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
1275 let Inst{4} = Rn{4};
1276 let DecoderMethod = "DecodeVSTInstruction";
1277 let AsmMatchConverter = "cvtVSTwbFixed";
1279 def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb),
1280 (ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd),
1282 "vst1", Dt, "$Vd, $Rn, $Rm",
1283 "$Rn.addr = $wb", []> {
1284 let Inst{4} = Rn{4};
1285 let DecoderMethod = "DecodeVSTInstruction";
1286 let AsmMatchConverter = "cvtVSTwbRegister";
1289 multiclass VST1QWB<bits<4> op7_4, string Dt> {
1290 def _fixed : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
1291 (ins addrmode6:$Rn, VecListTwoD:$Vd), IIC_VLD1x2u,
1292 "vst1", Dt, "$Vd, $Rn!",
1293 "$Rn.addr = $wb", []> {
1294 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
1295 let Inst{5-4} = Rn{5-4};
1296 let DecoderMethod = "DecodeVSTInstruction";
1297 let AsmMatchConverter = "cvtVSTwbFixed";
1299 def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
1300 (ins addrmode6:$Rn, rGPR:$Rm, VecListTwoD:$Vd),
1302 "vst1", Dt, "$Vd, $Rn, $Rm",
1303 "$Rn.addr = $wb", []> {
1304 let Inst{5-4} = Rn{5-4};
1305 let DecoderMethod = "DecodeVSTInstruction";
1306 let AsmMatchConverter = "cvtVSTwbRegister";
1310 defm VST1d8wb : VST1DWB<{0,0,0,?}, "8">;
1311 defm VST1d16wb : VST1DWB<{0,1,0,?}, "16">;
1312 defm VST1d32wb : VST1DWB<{1,0,0,?}, "32">;
1313 defm VST1d64wb : VST1DWB<{1,1,0,?}, "64">;
1315 defm VST1q8wb : VST1QWB<{0,0,?,?}, "8">;
1316 defm VST1q16wb : VST1QWB<{0,1,?,?}, "16">;
1317 defm VST1q32wb : VST1QWB<{1,0,?,?}, "32">;
1318 defm VST1q64wb : VST1QWB<{1,1,?,?}, "64">;
1320 def VST1q8PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1321 def VST1q16PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1322 def VST1q32PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1323 def VST1q64PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1324 def VST1q8PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1325 def VST1q16PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1326 def VST1q32PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1327 def VST1q64PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1329 // ...with 3 registers
1330 class VST1D3<bits<4> op7_4, string Dt>
1331 : NLdSt<0, 0b00, 0b0110, op7_4, (outs),
1332 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3),
1333 IIC_VST1x3, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
1335 let Inst{4} = Rn{4};
1336 let DecoderMethod = "DecodeVSTInstruction";
1338 class VST1D3WB<bits<4> op7_4, string Dt>
1339 : NLdSt<0, 0b00, 0b0110, op7_4, (outs GPR:$wb),
1340 (ins addrmode6:$Rn, am6offset:$Rm,
1341 DPR:$Vd, DPR:$src2, DPR:$src3),
1342 IIC_VST1x3u, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
1343 "$Rn.addr = $wb", []> {
1344 let Inst{4} = Rn{4};
1345 let DecoderMethod = "DecodeVSTInstruction";
1348 def VST1d8T : VST1D3<{0,0,0,?}, "8">;
1349 def VST1d16T : VST1D3<{0,1,0,?}, "16">;
1350 def VST1d32T : VST1D3<{1,0,0,?}, "32">;
1351 def VST1d64T : VST1D3<{1,1,0,?}, "64">;
1353 def VST1d8T_UPD : VST1D3WB<{0,0,0,?}, "8">;
1354 def VST1d16T_UPD : VST1D3WB<{0,1,0,?}, "16">;
1355 def VST1d32T_UPD : VST1D3WB<{1,0,0,?}, "32">;
1356 def VST1d64T_UPD : VST1D3WB<{1,1,0,?}, "64">;
1358 def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
1359 def VST1d64TPseudo_UPD : VSTQQWBPseudo<IIC_VST1x3u>;
1361 // ...with 4 registers
1362 class VST1D4<bits<4> op7_4, string Dt>
1363 : NLdSt<0, 0b00, 0b0010, op7_4, (outs),
1364 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1365 IIC_VST1x4, "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn", "",
1368 let Inst{5-4} = Rn{5-4};
1369 let DecoderMethod = "DecodeVSTInstruction";
1371 class VST1D4WB<bits<4> op7_4, string Dt>
1372 : NLdSt<0, 0b00, 0b0010, op7_4, (outs GPR:$wb),
1373 (ins addrmode6:$Rn, am6offset:$Rm,
1374 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST1x4u,
1375 "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1376 "$Rn.addr = $wb", []> {
1377 let Inst{5-4} = Rn{5-4};
1378 let DecoderMethod = "DecodeVSTInstruction";
1381 def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
1382 def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
1383 def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
1384 def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
1386 def VST1d8Q_UPD : VST1D4WB<{0,0,?,?}, "8">;
1387 def VST1d16Q_UPD : VST1D4WB<{0,1,?,?}, "16">;
1388 def VST1d32Q_UPD : VST1D4WB<{1,0,?,?}, "32">;
1389 def VST1d64Q_UPD : VST1D4WB<{1,1,?,?}, "64">;
1391 def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
1392 def VST1d64QPseudo_UPD : VSTQQWBPseudo<IIC_VST1x4u>;
1394 // VST2 : Vector Store (multiple 2-element structures)
1395 class VST2D<bits<4> op11_8, bits<4> op7_4, string Dt>
1396 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1397 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2),
1398 IIC_VST2, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
1400 let Inst{5-4} = Rn{5-4};
1401 let DecoderMethod = "DecodeVSTInstruction";
1403 class VST2Q<bits<4> op7_4, string Dt>
1404 : NLdSt<0, 0b00, 0b0011, op7_4, (outs),
1405 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1406 IIC_VST2x2, "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
1409 let Inst{5-4} = Rn{5-4};
1410 let DecoderMethod = "DecodeVSTInstruction";
1413 def VST2d8 : VST2D<0b1000, {0,0,?,?}, "8">;
1414 def VST2d16 : VST2D<0b1000, {0,1,?,?}, "16">;
1415 def VST2d32 : VST2D<0b1000, {1,0,?,?}, "32">;
1417 def VST2q8 : VST2Q<{0,0,?,?}, "8">;
1418 def VST2q16 : VST2Q<{0,1,?,?}, "16">;
1419 def VST2q32 : VST2Q<{1,0,?,?}, "32">;
1421 def VST2d8Pseudo : VSTQPseudo<IIC_VST2>;
1422 def VST2d16Pseudo : VSTQPseudo<IIC_VST2>;
1423 def VST2d32Pseudo : VSTQPseudo<IIC_VST2>;
1425 def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
1426 def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
1427 def VST2q32Pseudo : VSTQQPseudo<IIC_VST2x2>;
1429 // ...with address register writeback:
1430 class VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1431 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1432 (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
1433 IIC_VST2u, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
1434 "$Rn.addr = $wb", []> {
1435 let Inst{5-4} = Rn{5-4};
1436 let DecoderMethod = "DecodeVSTInstruction";
1438 class VST2QWB<bits<4> op7_4, string Dt>
1439 : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
1440 (ins addrmode6:$Rn, am6offset:$Rm,
1441 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST2x2u,
1442 "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1443 "$Rn.addr = $wb", []> {
1444 let Inst{5-4} = Rn{5-4};
1445 let DecoderMethod = "DecodeVSTInstruction";
1448 def VST2d8_UPD : VST2DWB<0b1000, {0,0,?,?}, "8">;
1449 def VST2d16_UPD : VST2DWB<0b1000, {0,1,?,?}, "16">;
1450 def VST2d32_UPD : VST2DWB<0b1000, {1,0,?,?}, "32">;
1452 def VST2q8_UPD : VST2QWB<{0,0,?,?}, "8">;
1453 def VST2q16_UPD : VST2QWB<{0,1,?,?}, "16">;
1454 def VST2q32_UPD : VST2QWB<{1,0,?,?}, "32">;
1456 def VST2d8Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1457 def VST2d16Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1458 def VST2d32Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1460 def VST2q8Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1461 def VST2q16Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1462 def VST2q32Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1464 // ...with double-spaced registers
1465 def VST2b8 : VST2D<0b1001, {0,0,?,?}, "8">;
1466 def VST2b16 : VST2D<0b1001, {0,1,?,?}, "16">;
1467 def VST2b32 : VST2D<0b1001, {1,0,?,?}, "32">;
1468 def VST2b8_UPD : VST2DWB<0b1001, {0,0,?,?}, "8">;
1469 def VST2b16_UPD : VST2DWB<0b1001, {0,1,?,?}, "16">;
1470 def VST2b32_UPD : VST2DWB<0b1001, {1,0,?,?}, "32">;
1472 // VST3 : Vector Store (multiple 3-element structures)
1473 class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
1474 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1475 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3,
1476 "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
1478 let Inst{4} = Rn{4};
1479 let DecoderMethod = "DecodeVSTInstruction";
1482 def VST3d8 : VST3D<0b0100, {0,0,0,?}, "8">;
1483 def VST3d16 : VST3D<0b0100, {0,1,0,?}, "16">;
1484 def VST3d32 : VST3D<0b0100, {1,0,0,?}, "32">;
1486 def VST3d8Pseudo : VSTQQPseudo<IIC_VST3>;
1487 def VST3d16Pseudo : VSTQQPseudo<IIC_VST3>;
1488 def VST3d32Pseudo : VSTQQPseudo<IIC_VST3>;
1490 // ...with address register writeback:
1491 class VST3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1492 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1493 (ins addrmode6:$Rn, am6offset:$Rm,
1494 DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3u,
1495 "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
1496 "$Rn.addr = $wb", []> {
1497 let Inst{4} = Rn{4};
1498 let DecoderMethod = "DecodeVSTInstruction";
1501 def VST3d8_UPD : VST3DWB<0b0100, {0,0,0,?}, "8">;
1502 def VST3d16_UPD : VST3DWB<0b0100, {0,1,0,?}, "16">;
1503 def VST3d32_UPD : VST3DWB<0b0100, {1,0,0,?}, "32">;
1505 def VST3d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1506 def VST3d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1507 def VST3d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1509 // ...with double-spaced registers:
1510 def VST3q8 : VST3D<0b0101, {0,0,0,?}, "8">;
1511 def VST3q16 : VST3D<0b0101, {0,1,0,?}, "16">;
1512 def VST3q32 : VST3D<0b0101, {1,0,0,?}, "32">;
1513 def VST3q8_UPD : VST3DWB<0b0101, {0,0,0,?}, "8">;
1514 def VST3q16_UPD : VST3DWB<0b0101, {0,1,0,?}, "16">;
1515 def VST3q32_UPD : VST3DWB<0b0101, {1,0,0,?}, "32">;
1517 def VST3q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1518 def VST3q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1519 def VST3q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1521 // ...alternate versions to be allocated odd register numbers:
1522 def VST3q8oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1523 def VST3q16oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1524 def VST3q32oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1526 def VST3q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1527 def VST3q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1528 def VST3q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1530 // VST4 : Vector Store (multiple 4-element structures)
1531 class VST4D<bits<4> op11_8, bits<4> op7_4, string Dt>
1532 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1533 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1534 IIC_VST4, "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
1537 let Inst{5-4} = Rn{5-4};
1538 let DecoderMethod = "DecodeVSTInstruction";
1541 def VST4d8 : VST4D<0b0000, {0,0,?,?}, "8">;
1542 def VST4d16 : VST4D<0b0000, {0,1,?,?}, "16">;
1543 def VST4d32 : VST4D<0b0000, {1,0,?,?}, "32">;
1545 def VST4d8Pseudo : VSTQQPseudo<IIC_VST4>;
1546 def VST4d16Pseudo : VSTQQPseudo<IIC_VST4>;
1547 def VST4d32Pseudo : VSTQQPseudo<IIC_VST4>;
1549 // ...with address register writeback:
1550 class VST4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1551 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1552 (ins addrmode6:$Rn, am6offset:$Rm,
1553 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST4u,
1554 "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1555 "$Rn.addr = $wb", []> {
1556 let Inst{5-4} = Rn{5-4};
1557 let DecoderMethod = "DecodeVSTInstruction";
1560 def VST4d8_UPD : VST4DWB<0b0000, {0,0,?,?}, "8">;
1561 def VST4d16_UPD : VST4DWB<0b0000, {0,1,?,?}, "16">;
1562 def VST4d32_UPD : VST4DWB<0b0000, {1,0,?,?}, "32">;
1564 def VST4d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1565 def VST4d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1566 def VST4d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1568 // ...with double-spaced registers:
1569 def VST4q8 : VST4D<0b0001, {0,0,?,?}, "8">;
1570 def VST4q16 : VST4D<0b0001, {0,1,?,?}, "16">;
1571 def VST4q32 : VST4D<0b0001, {1,0,?,?}, "32">;
1572 def VST4q8_UPD : VST4DWB<0b0001, {0,0,?,?}, "8">;
1573 def VST4q16_UPD : VST4DWB<0b0001, {0,1,?,?}, "16">;
1574 def VST4q32_UPD : VST4DWB<0b0001, {1,0,?,?}, "32">;
1576 def VST4q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1577 def VST4q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1578 def VST4q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1580 // ...alternate versions to be allocated odd register numbers:
1581 def VST4q8oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1582 def VST4q16oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1583 def VST4q32oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1585 def VST4q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1586 def VST4q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1587 def VST4q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1589 } // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
1591 // Classes for VST*LN pseudo-instructions with multi-register operands.
1592 // These are expanded to real instructions after register allocation.
1593 class VSTQLNPseudo<InstrItinClass itin>
1594 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
1596 class VSTQLNWBPseudo<InstrItinClass itin>
1597 : PseudoNLdSt<(outs GPR:$wb),
1598 (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
1599 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1600 class VSTQQLNPseudo<InstrItinClass itin>
1601 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
1603 class VSTQQLNWBPseudo<InstrItinClass itin>
1604 : PseudoNLdSt<(outs GPR:$wb),
1605 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
1606 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1607 class VSTQQQQLNPseudo<InstrItinClass itin>
1608 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
1610 class VSTQQQQLNWBPseudo<InstrItinClass itin>
1611 : PseudoNLdSt<(outs GPR:$wb),
1612 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
1613 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1615 // VST1LN : Vector Store (single element from one lane)
1616 class VST1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1617 PatFrag StoreOp, SDNode ExtractOp>
1618 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1619 (ins addrmode6:$Rn, DPR:$Vd, nohash_imm:$lane),
1620 IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
1621 [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6:$Rn)]> {
1623 let DecoderMethod = "DecodeVST1LN";
1625 class VST1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1626 PatFrag StoreOp, SDNode ExtractOp>
1627 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1628 (ins addrmode6oneL32:$Rn, DPR:$Vd, nohash_imm:$lane),
1629 IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
1630 [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6oneL32:$Rn)]>{
1632 let DecoderMethod = "DecodeVST1LN";
1634 class VST1QLNPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
1635 : VSTQLNPseudo<IIC_VST1ln> {
1636 let Pattern = [(StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
1640 def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8,
1642 let Inst{7-5} = lane{2-0};
1644 def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
1646 let Inst{7-6} = lane{1-0};
1647 let Inst{4} = Rn{5};
1650 def VST1LNd32 : VST1LN32<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt> {
1651 let Inst{7} = lane{0};
1652 let Inst{5-4} = Rn{5-4};
1655 def VST1LNq8Pseudo : VST1QLNPseudo<v16i8, truncstorei8, NEONvgetlaneu>;
1656 def VST1LNq16Pseudo : VST1QLNPseudo<v8i16, truncstorei16, NEONvgetlaneu>;
1657 def VST1LNq32Pseudo : VST1QLNPseudo<v4i32, store, extractelt>;
1659 def : Pat<(store (extractelt (v2f32 DPR:$src), imm:$lane), addrmode6:$addr),
1660 (VST1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
1661 def : Pat<(store (extractelt (v4f32 QPR:$src), imm:$lane), addrmode6:$addr),
1662 (VST1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
1664 // ...with address register writeback:
1665 class VST1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1666 PatFrag StoreOp, SDNode ExtractOp>
1667 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1668 (ins addrmode6:$Rn, am6offset:$Rm,
1669 DPR:$Vd, nohash_imm:$lane), IIC_VST1lnu, "vst1", Dt,
1670 "\\{$Vd[$lane]\\}, $Rn$Rm",
1672 [(set GPR:$wb, (StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane),
1673 addrmode6:$Rn, am6offset:$Rm))]> {
1674 let DecoderMethod = "DecodeVST1LN";
1676 class VST1QLNWBPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
1677 : VSTQLNWBPseudo<IIC_VST1lnu> {
1678 let Pattern = [(set GPR:$wb, (StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
1679 addrmode6:$addr, am6offset:$offset))];
1682 def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8", v8i8, post_truncsti8,
1684 let Inst{7-5} = lane{2-0};
1686 def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16", v4i16, post_truncsti16,
1688 let Inst{7-6} = lane{1-0};
1689 let Inst{4} = Rn{5};
1691 def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32", v2i32, post_store,
1693 let Inst{7} = lane{0};
1694 let Inst{5-4} = Rn{5-4};
1697 def VST1LNq8Pseudo_UPD : VST1QLNWBPseudo<v16i8, post_truncsti8, NEONvgetlaneu>;
1698 def VST1LNq16Pseudo_UPD : VST1QLNWBPseudo<v8i16, post_truncsti16,NEONvgetlaneu>;
1699 def VST1LNq32Pseudo_UPD : VST1QLNWBPseudo<v4i32, post_store, extractelt>;
1701 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
1703 // VST2LN : Vector Store (single 2-element structure from one lane)
1704 class VST2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1705 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1706 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, nohash_imm:$lane),
1707 IIC_VST2ln, "vst2", Dt, "\\{$Vd[$lane], $src2[$lane]\\}, $Rn",
1710 let Inst{4} = Rn{4};
1711 let DecoderMethod = "DecodeVST2LN";
1714 def VST2LNd8 : VST2LN<0b0001, {?,?,?,?}, "8"> {
1715 let Inst{7-5} = lane{2-0};
1717 def VST2LNd16 : VST2LN<0b0101, {?,?,0,?}, "16"> {
1718 let Inst{7-6} = lane{1-0};
1720 def VST2LNd32 : VST2LN<0b1001, {?,0,0,?}, "32"> {
1721 let Inst{7} = lane{0};
1724 def VST2LNd8Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1725 def VST2LNd16Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1726 def VST2LNd32Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1728 // ...with double-spaced registers:
1729 def VST2LNq16 : VST2LN<0b0101, {?,?,1,?}, "16"> {
1730 let Inst{7-6} = lane{1-0};
1731 let Inst{4} = Rn{4};
1733 def VST2LNq32 : VST2LN<0b1001, {?,1,0,?}, "32"> {
1734 let Inst{7} = lane{0};
1735 let Inst{4} = Rn{4};
1738 def VST2LNq16Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
1739 def VST2LNq32Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
1741 // ...with address register writeback:
1742 class VST2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1743 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1744 (ins addrmode6:$addr, am6offset:$offset,
1745 DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
1746 "\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
1747 "$addr.addr = $wb", []> {
1748 let Inst{4} = Rn{4};
1749 let DecoderMethod = "DecodeVST2LN";
1752 def VST2LNd8_UPD : VST2LNWB<0b0001, {?,?,?,?}, "8"> {
1753 let Inst{7-5} = lane{2-0};
1755 def VST2LNd16_UPD : VST2LNWB<0b0101, {?,?,0,?}, "16"> {
1756 let Inst{7-6} = lane{1-0};
1758 def VST2LNd32_UPD : VST2LNWB<0b1001, {?,0,0,?}, "32"> {
1759 let Inst{7} = lane{0};
1762 def VST2LNd8Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1763 def VST2LNd16Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1764 def VST2LNd32Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1766 def VST2LNq16_UPD : VST2LNWB<0b0101, {?,?,1,?}, "16"> {
1767 let Inst{7-6} = lane{1-0};
1769 def VST2LNq32_UPD : VST2LNWB<0b1001, {?,1,0,?}, "32"> {
1770 let Inst{7} = lane{0};
1773 def VST2LNq16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
1774 def VST2LNq32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
1776 // VST3LN : Vector Store (single 3-element structure from one lane)
1777 class VST3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1778 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1779 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3,
1780 nohash_imm:$lane), IIC_VST3ln, "vst3", Dt,
1781 "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn", "", []> {
1783 let DecoderMethod = "DecodeVST3LN";
1786 def VST3LNd8 : VST3LN<0b0010, {?,?,?,0}, "8"> {
1787 let Inst{7-5} = lane{2-0};
1789 def VST3LNd16 : VST3LN<0b0110, {?,?,0,0}, "16"> {
1790 let Inst{7-6} = lane{1-0};
1792 def VST3LNd32 : VST3LN<0b1010, {?,0,0,0}, "32"> {
1793 let Inst{7} = lane{0};
1796 def VST3LNd8Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1797 def VST3LNd16Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1798 def VST3LNd32Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1800 // ...with double-spaced registers:
1801 def VST3LNq16 : VST3LN<0b0110, {?,?,1,0}, "16"> {
1802 let Inst{7-6} = lane{1-0};
1804 def VST3LNq32 : VST3LN<0b1010, {?,1,0,0}, "32"> {
1805 let Inst{7} = lane{0};
1808 def VST3LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
1809 def VST3LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
1811 // ...with address register writeback:
1812 class VST3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1813 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1814 (ins addrmode6:$Rn, am6offset:$Rm,
1815 DPR:$Vd, DPR:$src2, DPR:$src3, nohash_imm:$lane),
1816 IIC_VST3lnu, "vst3", Dt,
1817 "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn$Rm",
1818 "$Rn.addr = $wb", []> {
1819 let DecoderMethod = "DecodeVST3LN";
1822 def VST3LNd8_UPD : VST3LNWB<0b0010, {?,?,?,0}, "8"> {
1823 let Inst{7-5} = lane{2-0};
1825 def VST3LNd16_UPD : VST3LNWB<0b0110, {?,?,0,0}, "16"> {
1826 let Inst{7-6} = lane{1-0};
1828 def VST3LNd32_UPD : VST3LNWB<0b1010, {?,0,0,0}, "32"> {
1829 let Inst{7} = lane{0};
1832 def VST3LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1833 def VST3LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1834 def VST3LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1836 def VST3LNq16_UPD : VST3LNWB<0b0110, {?,?,1,0}, "16"> {
1837 let Inst{7-6} = lane{1-0};
1839 def VST3LNq32_UPD : VST3LNWB<0b1010, {?,1,0,0}, "32"> {
1840 let Inst{7} = lane{0};
1843 def VST3LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
1844 def VST3LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
1846 // VST4LN : Vector Store (single 4-element structure from one lane)
1847 class VST4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1848 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1849 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4,
1850 nohash_imm:$lane), IIC_VST4ln, "vst4", Dt,
1851 "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn",
1854 let Inst{4} = Rn{4};
1855 let DecoderMethod = "DecodeVST4LN";
1858 def VST4LNd8 : VST4LN<0b0011, {?,?,?,?}, "8"> {
1859 let Inst{7-5} = lane{2-0};
1861 def VST4LNd16 : VST4LN<0b0111, {?,?,0,?}, "16"> {
1862 let Inst{7-6} = lane{1-0};
1864 def VST4LNd32 : VST4LN<0b1011, {?,0,?,?}, "32"> {
1865 let Inst{7} = lane{0};
1866 let Inst{5} = Rn{5};
1869 def VST4LNd8Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1870 def VST4LNd16Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1871 def VST4LNd32Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1873 // ...with double-spaced registers:
1874 def VST4LNq16 : VST4LN<0b0111, {?,?,1,?}, "16"> {
1875 let Inst{7-6} = lane{1-0};
1877 def VST4LNq32 : VST4LN<0b1011, {?,1,?,?}, "32"> {
1878 let Inst{7} = lane{0};
1879 let Inst{5} = Rn{5};
1882 def VST4LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
1883 def VST4LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
1885 // ...with address register writeback:
1886 class VST4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1887 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1888 (ins addrmode6:$Rn, am6offset:$Rm,
1889 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
1890 IIC_VST4lnu, "vst4", Dt,
1891 "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn$Rm",
1892 "$Rn.addr = $wb", []> {
1893 let Inst{4} = Rn{4};
1894 let DecoderMethod = "DecodeVST4LN";
1897 def VST4LNd8_UPD : VST4LNWB<0b0011, {?,?,?,?}, "8"> {
1898 let Inst{7-5} = lane{2-0};
1900 def VST4LNd16_UPD : VST4LNWB<0b0111, {?,?,0,?}, "16"> {
1901 let Inst{7-6} = lane{1-0};
1903 def VST4LNd32_UPD : VST4LNWB<0b1011, {?,0,?,?}, "32"> {
1904 let Inst{7} = lane{0};
1905 let Inst{5} = Rn{5};
1908 def VST4LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1909 def VST4LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1910 def VST4LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1912 def VST4LNq16_UPD : VST4LNWB<0b0111, {?,?,1,?}, "16"> {
1913 let Inst{7-6} = lane{1-0};
1915 def VST4LNq32_UPD : VST4LNWB<0b1011, {?,1,?,?}, "32"> {
1916 let Inst{7} = lane{0};
1917 let Inst{5} = Rn{5};
1920 def VST4LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
1921 def VST4LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
1923 } // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
1926 //===----------------------------------------------------------------------===//
1927 // NEON pattern fragments
1928 //===----------------------------------------------------------------------===//
1930 // Extract D sub-registers of Q registers.
1931 def DSubReg_i8_reg : SDNodeXForm<imm, [{
1932 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1933 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/8, MVT::i32);
1935 def DSubReg_i16_reg : SDNodeXForm<imm, [{
1936 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1937 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/4, MVT::i32);
1939 def DSubReg_i32_reg : SDNodeXForm<imm, [{
1940 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1941 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/2, MVT::i32);
1943 def DSubReg_f64_reg : SDNodeXForm<imm, [{
1944 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1945 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue(), MVT::i32);
1948 // Extract S sub-registers of Q/D registers.
1949 def SSubReg_f32_reg : SDNodeXForm<imm, [{
1950 assert(ARM::ssub_3 == ARM::ssub_0+3 && "Unexpected subreg numbering");
1951 return CurDAG->getTargetConstant(ARM::ssub_0 + N->getZExtValue(), MVT::i32);
1954 // Translate lane numbers from Q registers to D subregs.
1955 def SubReg_i8_lane : SDNodeXForm<imm, [{
1956 return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
1958 def SubReg_i16_lane : SDNodeXForm<imm, [{
1959 return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
1961 def SubReg_i32_lane : SDNodeXForm<imm, [{
1962 return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
1965 //===----------------------------------------------------------------------===//
1966 // Instruction Classes
1967 //===----------------------------------------------------------------------===//
1969 // Basic 2-register operations: double- and quad-register.
1970 class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1971 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
1972 string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
1973 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
1974 (ins DPR:$Vm), IIC_VUNAD, OpcodeStr, Dt,"$Vd, $Vm", "",
1975 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm))))]>;
1976 class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1977 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
1978 string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
1979 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
1980 (ins QPR:$Vm), IIC_VUNAQ, OpcodeStr, Dt,"$Vd, $Vm", "",
1981 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm))))]>;
1983 // Basic 2-register intrinsics, both double- and quad-register.
1984 class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1985 bits<2> op17_16, bits<5> op11_7, bit op4,
1986 InstrItinClass itin, string OpcodeStr, string Dt,
1987 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
1988 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
1989 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1990 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
1991 class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1992 bits<2> op17_16, bits<5> op11_7, bit op4,
1993 InstrItinClass itin, string OpcodeStr, string Dt,
1994 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
1995 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
1996 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1997 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
1999 // Narrow 2-register operations.
2000 class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2001 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2002 InstrItinClass itin, string OpcodeStr, string Dt,
2003 ValueType TyD, ValueType TyQ, SDNode OpNode>
2004 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
2005 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2006 [(set DPR:$Vd, (TyD (OpNode (TyQ QPR:$Vm))))]>;
2008 // Narrow 2-register intrinsics.
2009 class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2010 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2011 InstrItinClass itin, string OpcodeStr, string Dt,
2012 ValueType TyD, ValueType TyQ, Intrinsic IntOp>
2013 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
2014 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2015 [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vm))))]>;
2017 // Long 2-register operations (currently only used for VMOVL).
2018 class N2VL<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2019 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2020 InstrItinClass itin, string OpcodeStr, string Dt,
2021 ValueType TyQ, ValueType TyD, SDNode OpNode>
2022 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
2023 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2024 [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vm))))]>;
2026 // Long 2-register intrinsics.
2027 class N2VLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2028 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2029 InstrItinClass itin, string OpcodeStr, string Dt,
2030 ValueType TyQ, ValueType TyD, Intrinsic IntOp>
2031 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
2032 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2033 [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vm))))]>;
2035 // 2-register shuffles (VTRN/VZIP/VUZP), both double- and quad-register.
2036 class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr, string Dt>
2037 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$Vd, DPR:$Vm),
2038 (ins DPR:$src1, DPR:$src2), IIC_VPERMD,
2039 OpcodeStr, Dt, "$Vd, $Vm",
2040 "$src1 = $Vd, $src2 = $Vm", []>;
2041 class N2VQShuffle<bits<2> op19_18, bits<5> op11_7,
2042 InstrItinClass itin, string OpcodeStr, string Dt>
2043 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$Vd, QPR:$Vm),
2044 (ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$Vd, $Vm",
2045 "$src1 = $Vd, $src2 = $Vm", []>;
2047 // Basic 3-register operations: double- and quad-register.
2048 class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2049 InstrItinClass itin, string OpcodeStr, string Dt,
2050 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2051 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2052 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2053 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2054 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
2055 let isCommutable = Commutable;
2057 // Same as N3VD but no data type.
2058 class N3VDX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2059 InstrItinClass itin, string OpcodeStr,
2060 ValueType ResTy, ValueType OpTy,
2061 SDNode OpNode, bit Commutable>
2062 : N3VX<op24, op23, op21_20, op11_8, 0, op4,
2063 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2064 OpcodeStr, "$Vd, $Vn, $Vm", "",
2065 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>{
2066 let isCommutable = Commutable;
2069 class N3VDSL<bits<2> op21_20, bits<4> op11_8,
2070 InstrItinClass itin, string OpcodeStr, string Dt,
2071 ValueType Ty, SDNode ShOp>
2072 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2073 (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2074 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2076 (Ty (ShOp (Ty DPR:$Vn),
2077 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),imm:$lane)))))]> {
2078 let isCommutable = 0;
2080 class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
2081 string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
2082 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2083 (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2084 NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane","",
2086 (Ty (ShOp (Ty DPR:$Vn),
2087 (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
2088 let isCommutable = 0;
2091 class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2092 InstrItinClass itin, string OpcodeStr, string Dt,
2093 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2094 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2095 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2096 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2097 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
2098 let isCommutable = Commutable;
2100 class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2101 InstrItinClass itin, string OpcodeStr,
2102 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2103 : N3VX<op24, op23, op21_20, op11_8, 1, op4,
2104 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2105 OpcodeStr, "$Vd, $Vn, $Vm", "",
2106 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>{
2107 let isCommutable = Commutable;
2109 class N3VQSL<bits<2> op21_20, bits<4> op11_8,
2110 InstrItinClass itin, string OpcodeStr, string Dt,
2111 ValueType ResTy, ValueType OpTy, SDNode ShOp>
2112 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2113 (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2114 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2115 [(set (ResTy QPR:$Vd),
2116 (ResTy (ShOp (ResTy QPR:$Vn),
2117 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2119 let isCommutable = 0;
2121 class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
2122 ValueType ResTy, ValueType OpTy, SDNode ShOp>
2123 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2124 (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2125 NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane", "",
2126 [(set (ResTy QPR:$Vd),
2127 (ResTy (ShOp (ResTy QPR:$Vn),
2128 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2130 let isCommutable = 0;
2133 // Basic 3-register intrinsics, both double- and quad-register.
2134 class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2135 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2136 ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
2137 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2138 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), f, itin,
2139 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2140 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
2141 let isCommutable = Commutable;
2143 class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2144 string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
2145 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2146 (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2147 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2149 (Ty (IntOp (Ty DPR:$Vn),
2150 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
2152 let isCommutable = 0;
2154 class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2155 string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
2156 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2157 (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2158 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2160 (Ty (IntOp (Ty DPR:$Vn),
2161 (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
2162 let isCommutable = 0;
2164 class N3VDIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2165 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2166 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2167 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2168 (outs DPR:$Vd), (ins DPR:$Vm, DPR:$Vn), f, itin,
2169 OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
2170 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (OpTy DPR:$Vn))))]> {
2171 let isCommutable = 0;
2174 class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2175 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2176 ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
2177 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2178 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin,
2179 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2180 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
2181 let isCommutable = Commutable;
2183 class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2184 string OpcodeStr, string Dt,
2185 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2186 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2187 (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2188 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2189 [(set (ResTy QPR:$Vd),
2190 (ResTy (IntOp (ResTy QPR:$Vn),
2191 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2193 let isCommutable = 0;
2195 class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2196 string OpcodeStr, string Dt,
2197 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2198 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2199 (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2200 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2201 [(set (ResTy QPR:$Vd),
2202 (ResTy (IntOp (ResTy QPR:$Vn),
2203 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2205 let isCommutable = 0;
2207 class N3VQIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2208 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2209 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2210 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2211 (outs QPR:$Vd), (ins QPR:$Vm, QPR:$Vn), f, itin,
2212 OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
2213 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (OpTy QPR:$Vn))))]> {
2214 let isCommutable = 0;
2217 // Multiply-Add/Sub operations: double- and quad-register.
2218 class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2219 InstrItinClass itin, string OpcodeStr, string Dt,
2220 ValueType Ty, SDPatternOperator MulOp, SDPatternOperator OpNode>
2221 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2222 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2223 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2224 [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
2225 (Ty (MulOp DPR:$Vn, DPR:$Vm)))))]>;
2227 class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2228 string OpcodeStr, string Dt,
2229 ValueType Ty, SDPatternOperator MulOp, SDPatternOperator ShOp>
2230 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2232 (ins DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2234 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2236 (Ty (ShOp (Ty DPR:$src1),
2238 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
2240 class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2241 string OpcodeStr, string Dt,
2242 ValueType Ty, SDNode MulOp, SDNode ShOp>
2243 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2245 (ins DPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2247 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2249 (Ty (ShOp (Ty DPR:$src1),
2251 (Ty (NEONvduplane (Ty DPR_8:$Vm),
2254 class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2255 InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty,
2256 SDPatternOperator MulOp, SDPatternOperator OpNode>
2257 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2258 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2259 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2260 [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
2261 (Ty (MulOp QPR:$Vn, QPR:$Vm)))))]>;
2262 class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2263 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2264 SDPatternOperator MulOp, SDPatternOperator ShOp>
2265 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2267 (ins QPR:$src1, QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2269 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2270 [(set (ResTy QPR:$Vd),
2271 (ResTy (ShOp (ResTy QPR:$src1),
2272 (ResTy (MulOp QPR:$Vn,
2273 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2275 class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2276 string OpcodeStr, string Dt,
2277 ValueType ResTy, ValueType OpTy,
2278 SDNode MulOp, SDNode ShOp>
2279 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2281 (ins QPR:$src1, QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2283 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2284 [(set (ResTy QPR:$Vd),
2285 (ResTy (ShOp (ResTy QPR:$src1),
2286 (ResTy (MulOp QPR:$Vn,
2287 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2290 // Neon Intrinsic-Op instructions (VABA): double- and quad-register.
2291 class N3VDIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2292 InstrItinClass itin, string OpcodeStr, string Dt,
2293 ValueType Ty, Intrinsic IntOp, SDNode OpNode>
2294 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2295 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2296 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2297 [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
2298 (Ty (IntOp (Ty DPR:$Vn), (Ty DPR:$Vm))))))]>;
2299 class N3VQIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2300 InstrItinClass itin, string OpcodeStr, string Dt,
2301 ValueType Ty, Intrinsic IntOp, SDNode OpNode>
2302 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2303 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2304 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2305 [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
2306 (Ty (IntOp (Ty QPR:$Vn), (Ty QPR:$Vm))))))]>;
2308 // Neon 3-argument intrinsics, both double- and quad-register.
2309 // The destination register is also used as the first source operand register.
2310 class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2311 InstrItinClass itin, string OpcodeStr, string Dt,
2312 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2313 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2314 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2315 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2316 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$src1),
2317 (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
2318 class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2319 InstrItinClass itin, string OpcodeStr, string Dt,
2320 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2321 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2322 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2323 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2324 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src1),
2325 (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>;
2327 // Long Multiply-Add/Sub operations.
2328 class N3VLMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2329 InstrItinClass itin, string OpcodeStr, string Dt,
2330 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2331 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2332 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2333 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2334 [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
2335 (TyQ (MulOp (TyD DPR:$Vn),
2336 (TyD DPR:$Vm)))))]>;
2337 class N3VLMulOpSL<bit op24, bits<2> op21_20, bits<4> op11_8,
2338 InstrItinClass itin, string OpcodeStr, string Dt,
2339 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2340 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
2341 (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2343 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2345 (OpNode (TyQ QPR:$src1),
2346 (TyQ (MulOp (TyD DPR:$Vn),
2347 (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),
2349 class N3VLMulOpSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2350 InstrItinClass itin, string OpcodeStr, string Dt,
2351 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2352 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
2353 (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2355 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2357 (OpNode (TyQ QPR:$src1),
2358 (TyQ (MulOp (TyD DPR:$Vn),
2359 (TyD (NEONvduplane (TyD DPR_8:$Vm),
2362 // Long Intrinsic-Op vector operations with explicit extend (VABAL).
2363 class N3VLIntExtOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2364 InstrItinClass itin, string OpcodeStr, string Dt,
2365 ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
2367 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2368 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2369 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2370 [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
2371 (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
2372 (TyD DPR:$Vm)))))))]>;
2374 // Neon Long 3-argument intrinsic. The destination register is
2375 // a quad-register and is also used as the first source operand register.
2376 class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2377 InstrItinClass itin, string OpcodeStr, string Dt,
2378 ValueType TyQ, ValueType TyD, Intrinsic IntOp>
2379 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2380 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2381 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2383 (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$Vn), (TyD DPR:$Vm))))]>;
2384 class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2385 string OpcodeStr, string Dt,
2386 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2387 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2389 (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2391 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2392 [(set (ResTy QPR:$Vd),
2393 (ResTy (IntOp (ResTy QPR:$src1),
2395 (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2397 class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2398 InstrItinClass itin, string OpcodeStr, string Dt,
2399 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2400 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2402 (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2404 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2405 [(set (ResTy QPR:$Vd),
2406 (ResTy (IntOp (ResTy QPR:$src1),
2408 (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
2411 // Narrowing 3-register intrinsics.
2412 class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2413 string OpcodeStr, string Dt, ValueType TyD, ValueType TyQ,
2414 Intrinsic IntOp, bit Commutable>
2415 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2416 (outs DPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINi4D,
2417 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2418 [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vn), (TyQ QPR:$Vm))))]> {
2419 let isCommutable = Commutable;
2422 // Long 3-register operations.
2423 class N3VL<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2424 InstrItinClass itin, string OpcodeStr, string Dt,
2425 ValueType TyQ, ValueType TyD, SDNode OpNode, bit Commutable>
2426 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2427 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2428 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2429 [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
2430 let isCommutable = Commutable;
2432 class N3VLSL<bit op24, bits<2> op21_20, bits<4> op11_8,
2433 InstrItinClass itin, string OpcodeStr, string Dt,
2434 ValueType TyQ, ValueType TyD, SDNode OpNode>
2435 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2436 (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2437 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2439 (TyQ (OpNode (TyD DPR:$Vn),
2440 (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),imm:$lane)))))]>;
2441 class N3VLSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2442 InstrItinClass itin, string OpcodeStr, string Dt,
2443 ValueType TyQ, ValueType TyD, SDNode OpNode>
2444 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2445 (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2446 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2448 (TyQ (OpNode (TyD DPR:$Vn),
2449 (TyD (NEONvduplane (TyD DPR_8:$Vm), imm:$lane)))))]>;
2451 // Long 3-register operations with explicitly extended operands.
2452 class N3VLExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2453 InstrItinClass itin, string OpcodeStr, string Dt,
2454 ValueType TyQ, ValueType TyD, SDNode OpNode, SDNode ExtOp,
2456 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2457 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2458 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2459 [(set QPR:$Vd, (OpNode (TyQ (ExtOp (TyD DPR:$Vn))),
2460 (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
2461 let isCommutable = Commutable;
2464 // Long 3-register intrinsics with explicit extend (VABDL).
2465 class N3VLIntExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2466 InstrItinClass itin, string OpcodeStr, string Dt,
2467 ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
2469 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2470 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2471 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2472 [(set QPR:$Vd, (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
2473 (TyD DPR:$Vm))))))]> {
2474 let isCommutable = Commutable;
2477 // Long 3-register intrinsics.
2478 class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2479 InstrItinClass itin, string OpcodeStr, string Dt,
2480 ValueType TyQ, ValueType TyD, Intrinsic IntOp, bit Commutable>
2481 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2482 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2483 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2484 [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
2485 let isCommutable = Commutable;
2487 class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2488 string OpcodeStr, string Dt,
2489 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2490 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2491 (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2492 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2493 [(set (ResTy QPR:$Vd),
2494 (ResTy (IntOp (OpTy DPR:$Vn),
2495 (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2497 class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2498 InstrItinClass itin, string OpcodeStr, string Dt,
2499 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2500 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2501 (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2502 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2503 [(set (ResTy QPR:$Vd),
2504 (ResTy (IntOp (OpTy DPR:$Vn),
2505 (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
2508 // Wide 3-register operations.
2509 class N3VW<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2510 string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD,
2511 SDNode OpNode, SDNode ExtOp, bit Commutable>
2512 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2513 (outs QPR:$Vd), (ins QPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VSUBiD,
2514 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2515 [(set QPR:$Vd, (OpNode (TyQ QPR:$Vn),
2516 (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
2517 let isCommutable = Commutable;
2520 // Pairwise long 2-register intrinsics, both double- and quad-register.
2521 class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2522 bits<2> op17_16, bits<5> op11_7, bit op4,
2523 string OpcodeStr, string Dt,
2524 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2525 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
2526 (ins DPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
2527 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
2528 class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2529 bits<2> op17_16, bits<5> op11_7, bit op4,
2530 string OpcodeStr, string Dt,
2531 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2532 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
2533 (ins QPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
2534 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
2536 // Pairwise long 2-register accumulate intrinsics,
2537 // both double- and quad-register.
2538 // The destination register is also used as the first source operand register.
2539 class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2540 bits<2> op17_16, bits<5> op11_7, bit op4,
2541 string OpcodeStr, string Dt,
2542 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2543 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
2544 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vm), IIC_VPALiD,
2545 OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
2546 [(set DPR:$Vd, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$Vm))))]>;
2547 class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2548 bits<2> op17_16, bits<5> op11_7, bit op4,
2549 string OpcodeStr, string Dt,
2550 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2551 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
2552 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vm), IIC_VPALiQ,
2553 OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
2554 [(set QPR:$Vd, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$Vm))))]>;
2556 // Shift by immediate,
2557 // both double- and quad-register.
2558 class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2559 Format f, InstrItinClass itin, Operand ImmTy,
2560 string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
2561 : N2VImm<op24, op23, op11_8, op7, 0, op4,
2562 (outs DPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), f, itin,
2563 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2564 [(set DPR:$Vd, (Ty (OpNode (Ty DPR:$Vm), (i32 imm:$SIMM))))]>;
2565 class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2566 Format f, InstrItinClass itin, Operand ImmTy,
2567 string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
2568 : N2VImm<op24, op23, op11_8, op7, 1, op4,
2569 (outs QPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), f, itin,
2570 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2571 [(set QPR:$Vd, (Ty (OpNode (Ty QPR:$Vm), (i32 imm:$SIMM))))]>;
2573 // Long shift by immediate.
2574 class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
2575 string OpcodeStr, string Dt,
2576 ValueType ResTy, ValueType OpTy, SDNode OpNode>
2577 : N2VImm<op24, op23, op11_8, op7, op6, op4,
2578 (outs QPR:$Vd), (ins DPR:$Vm, i32imm:$SIMM), N2RegVShLFrm,
2579 IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2580 [(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
2581 (i32 imm:$SIMM))))]>;
2583 // Narrow shift by immediate.
2584 class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
2585 InstrItinClass itin, string OpcodeStr, string Dt,
2586 ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
2587 : N2VImm<op24, op23, op11_8, op7, op6, op4,
2588 (outs DPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, itin,
2589 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2590 [(set DPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm),
2591 (i32 imm:$SIMM))))]>;
2593 // Shift right by immediate and accumulate,
2594 // both double- and quad-register.
2595 class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2596 Operand ImmTy, string OpcodeStr, string Dt,
2597 ValueType Ty, SDNode ShOp>
2598 : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
2599 (ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
2600 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2601 [(set DPR:$Vd, (Ty (add DPR:$src1,
2602 (Ty (ShOp DPR:$Vm, (i32 imm:$SIMM))))))]>;
2603 class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2604 Operand ImmTy, string OpcodeStr, string Dt,
2605 ValueType Ty, SDNode ShOp>
2606 : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
2607 (ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
2608 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2609 [(set QPR:$Vd, (Ty (add QPR:$src1,
2610 (Ty (ShOp QPR:$Vm, (i32 imm:$SIMM))))))]>;
2612 // Shift by immediate and insert,
2613 // both double- and quad-register.
2614 class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2615 Operand ImmTy, Format f, string OpcodeStr, string Dt,
2616 ValueType Ty,SDNode ShOp>
2617 : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
2618 (ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiD,
2619 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2620 [(set DPR:$Vd, (Ty (ShOp DPR:$src1, DPR:$Vm, (i32 imm:$SIMM))))]>;
2621 class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2622 Operand ImmTy, Format f, string OpcodeStr, string Dt,
2623 ValueType Ty,SDNode ShOp>
2624 : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
2625 (ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiQ,
2626 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2627 [(set QPR:$Vd, (Ty (ShOp QPR:$src1, QPR:$Vm, (i32 imm:$SIMM))))]>;
2629 // Convert, with fractional bits immediate,
2630 // both double- and quad-register.
2631 class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2632 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2634 : N2VImm<op24, op23, op11_8, op7, 0, op4,
2635 (outs DPR:$Vd), (ins DPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
2636 IIC_VUNAD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2637 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (i32 imm:$SIMM))))]>;
2638 class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2639 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2641 : N2VImm<op24, op23, op11_8, op7, 1, op4,
2642 (outs QPR:$Vd), (ins QPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
2643 IIC_VUNAQ, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2644 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (i32 imm:$SIMM))))]>;
2646 //===----------------------------------------------------------------------===//
2648 //===----------------------------------------------------------------------===//
2650 // Abbreviations used in multiclass suffixes:
2651 // Q = quarter int (8 bit) elements
2652 // H = half int (16 bit) elements
2653 // S = single int (32 bit) elements
2654 // D = double int (64 bit) elements
2656 // Neon 2-register vector operations and intrinsics.
2658 // Neon 2-register comparisons.
2659 // source operand element sizes of 8, 16 and 32 bits:
2660 multiclass N2V_QHS_cmp<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2661 bits<5> op11_7, bit op4, string opc, string Dt,
2662 string asm, SDNode OpNode> {
2663 // 64-bit vector types.
2664 def v8i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 0, op4,
2665 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2666 opc, !strconcat(Dt, "8"), asm, "",
2667 [(set DPR:$Vd, (v8i8 (OpNode (v8i8 DPR:$Vm))))]>;
2668 def v4i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 0, op4,
2669 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2670 opc, !strconcat(Dt, "16"), asm, "",
2671 [(set DPR:$Vd, (v4i16 (OpNode (v4i16 DPR:$Vm))))]>;
2672 def v2i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
2673 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2674 opc, !strconcat(Dt, "32"), asm, "",
2675 [(set DPR:$Vd, (v2i32 (OpNode (v2i32 DPR:$Vm))))]>;
2676 def v2f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
2677 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2678 opc, "f32", asm, "",
2679 [(set DPR:$Vd, (v2i32 (OpNode (v2f32 DPR:$Vm))))]> {
2680 let Inst{10} = 1; // overwrite F = 1
2683 // 128-bit vector types.
2684 def v16i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 1, op4,
2685 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2686 opc, !strconcat(Dt, "8"), asm, "",
2687 [(set QPR:$Vd, (v16i8 (OpNode (v16i8 QPR:$Vm))))]>;
2688 def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4,
2689 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2690 opc, !strconcat(Dt, "16"), asm, "",
2691 [(set QPR:$Vd, (v8i16 (OpNode (v8i16 QPR:$Vm))))]>;
2692 def v4i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
2693 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2694 opc, !strconcat(Dt, "32"), asm, "",
2695 [(set QPR:$Vd, (v4i32 (OpNode (v4i32 QPR:$Vm))))]>;
2696 def v4f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
2697 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2698 opc, "f32", asm, "",
2699 [(set QPR:$Vd, (v4i32 (OpNode (v4f32 QPR:$Vm))))]> {
2700 let Inst{10} = 1; // overwrite F = 1
2705 // Neon 2-register vector intrinsics,
2706 // element sizes of 8, 16 and 32 bits:
2707 multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2708 bits<5> op11_7, bit op4,
2709 InstrItinClass itinD, InstrItinClass itinQ,
2710 string OpcodeStr, string Dt, Intrinsic IntOp> {
2711 // 64-bit vector types.
2712 def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
2713 itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
2714 def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
2715 itinD, OpcodeStr, !strconcat(Dt, "16"),v4i16,v4i16,IntOp>;
2716 def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
2717 itinD, OpcodeStr, !strconcat(Dt, "32"),v2i32,v2i32,IntOp>;
2719 // 128-bit vector types.
2720 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
2721 itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>;
2722 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
2723 itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>;
2724 def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
2725 itinQ, OpcodeStr, !strconcat(Dt, "32"),v4i32,v4i32,IntOp>;
2729 // Neon Narrowing 2-register vector operations,
2730 // source operand element sizes of 16, 32 and 64 bits:
2731 multiclass N2VN_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2732 bits<5> op11_7, bit op6, bit op4,
2733 InstrItinClass itin, string OpcodeStr, string Dt,
2735 def v8i8 : N2VN<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
2736 itin, OpcodeStr, !strconcat(Dt, "16"),
2737 v8i8, v8i16, OpNode>;
2738 def v4i16 : N2VN<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
2739 itin, OpcodeStr, !strconcat(Dt, "32"),
2740 v4i16, v4i32, OpNode>;
2741 def v2i32 : N2VN<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
2742 itin, OpcodeStr, !strconcat(Dt, "64"),
2743 v2i32, v2i64, OpNode>;
2746 // Neon Narrowing 2-register vector intrinsics,
2747 // source operand element sizes of 16, 32 and 64 bits:
2748 multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2749 bits<5> op11_7, bit op6, bit op4,
2750 InstrItinClass itin, string OpcodeStr, string Dt,
2752 def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
2753 itin, OpcodeStr, !strconcat(Dt, "16"),
2754 v8i8, v8i16, IntOp>;
2755 def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
2756 itin, OpcodeStr, !strconcat(Dt, "32"),
2757 v4i16, v4i32, IntOp>;
2758 def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
2759 itin, OpcodeStr, !strconcat(Dt, "64"),
2760 v2i32, v2i64, IntOp>;
2764 // Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
2765 // source operand element sizes of 16, 32 and 64 bits:
2766 multiclass N2VL_QHS<bits<2> op24_23, bits<5> op11_7, bit op6, bit op4,
2767 string OpcodeStr, string Dt, SDNode OpNode> {
2768 def v8i16 : N2VL<op24_23, 0b00, 0b10, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2769 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode>;
2770 def v4i32 : N2VL<op24_23, 0b01, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2771 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
2772 def v2i64 : N2VL<op24_23, 0b10, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2773 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
2777 // Neon 3-register vector operations.
2779 // First with only element sizes of 8, 16 and 32 bits:
2780 multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2781 InstrItinClass itinD16, InstrItinClass itinD32,
2782 InstrItinClass itinQ16, InstrItinClass itinQ32,
2783 string OpcodeStr, string Dt,
2784 SDNode OpNode, bit Commutable = 0> {
2785 // 64-bit vector types.
2786 def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, itinD16,
2787 OpcodeStr, !strconcat(Dt, "8"),
2788 v8i8, v8i8, OpNode, Commutable>;
2789 def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, itinD16,
2790 OpcodeStr, !strconcat(Dt, "16"),
2791 v4i16, v4i16, OpNode, Commutable>;
2792 def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, itinD32,
2793 OpcodeStr, !strconcat(Dt, "32"),
2794 v2i32, v2i32, OpNode, Commutable>;
2796 // 128-bit vector types.
2797 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, itinQ16,
2798 OpcodeStr, !strconcat(Dt, "8"),
2799 v16i8, v16i8, OpNode, Commutable>;
2800 def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, itinQ16,
2801 OpcodeStr, !strconcat(Dt, "16"),
2802 v8i16, v8i16, OpNode, Commutable>;
2803 def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, itinQ32,
2804 OpcodeStr, !strconcat(Dt, "32"),
2805 v4i32, v4i32, OpNode, Commutable>;
2808 multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, string Dt, SDNode ShOp> {
2809 def v4i16 : N3VDSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
2811 def v2i32 : N3VDSL<0b10, op11_8, IIC_VMULi32D, OpcodeStr, !strconcat(Dt,"32"),
2813 def v8i16 : N3VQSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
2814 v8i16, v4i16, ShOp>;
2815 def v4i32 : N3VQSL<0b10, op11_8, IIC_VMULi32Q, OpcodeStr, !strconcat(Dt,"32"),
2816 v4i32, v2i32, ShOp>;
2819 // ....then also with element size 64 bits:
2820 multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
2821 InstrItinClass itinD, InstrItinClass itinQ,
2822 string OpcodeStr, string Dt,
2823 SDNode OpNode, bit Commutable = 0>
2824 : N3V_QHS<op24, op23, op11_8, op4, itinD, itinD, itinQ, itinQ,
2825 OpcodeStr, Dt, OpNode, Commutable> {
2826 def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, itinD,
2827 OpcodeStr, !strconcat(Dt, "64"),
2828 v1i64, v1i64, OpNode, Commutable>;
2829 def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, itinQ,
2830 OpcodeStr, !strconcat(Dt, "64"),
2831 v2i64, v2i64, OpNode, Commutable>;
2835 // Neon 3-register vector intrinsics.
2837 // First with only element sizes of 16 and 32 bits:
2838 multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2839 InstrItinClass itinD16, InstrItinClass itinD32,
2840 InstrItinClass itinQ16, InstrItinClass itinQ32,
2841 string OpcodeStr, string Dt,
2842 Intrinsic IntOp, bit Commutable = 0> {
2843 // 64-bit vector types.
2844 def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, f, itinD16,
2845 OpcodeStr, !strconcat(Dt, "16"),
2846 v4i16, v4i16, IntOp, Commutable>;
2847 def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, f, itinD32,
2848 OpcodeStr, !strconcat(Dt, "32"),
2849 v2i32, v2i32, IntOp, Commutable>;
2851 // 128-bit vector types.
2852 def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, f, itinQ16,
2853 OpcodeStr, !strconcat(Dt, "16"),
2854 v8i16, v8i16, IntOp, Commutable>;
2855 def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, f, itinQ32,
2856 OpcodeStr, !strconcat(Dt, "32"),
2857 v4i32, v4i32, IntOp, Commutable>;
2859 multiclass N3VInt_HSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2860 InstrItinClass itinD16, InstrItinClass itinD32,
2861 InstrItinClass itinQ16, InstrItinClass itinQ32,
2862 string OpcodeStr, string Dt,
2864 // 64-bit vector types.
2865 def v4i16 : N3VDIntSh<op24, op23, 0b01, op11_8, op4, f, itinD16,
2866 OpcodeStr, !strconcat(Dt, "16"),
2867 v4i16, v4i16, IntOp>;
2868 def v2i32 : N3VDIntSh<op24, op23, 0b10, op11_8, op4, f, itinD32,
2869 OpcodeStr, !strconcat(Dt, "32"),
2870 v2i32, v2i32, IntOp>;
2872 // 128-bit vector types.
2873 def v8i16 : N3VQIntSh<op24, op23, 0b01, op11_8, op4, f, itinQ16,
2874 OpcodeStr, !strconcat(Dt, "16"),
2875 v8i16, v8i16, IntOp>;
2876 def v4i32 : N3VQIntSh<op24, op23, 0b10, op11_8, op4, f, itinQ32,
2877 OpcodeStr, !strconcat(Dt, "32"),
2878 v4i32, v4i32, IntOp>;
2881 multiclass N3VIntSL_HS<bits<4> op11_8,
2882 InstrItinClass itinD16, InstrItinClass itinD32,
2883 InstrItinClass itinQ16, InstrItinClass itinQ32,
2884 string OpcodeStr, string Dt, Intrinsic IntOp> {
2885 def v4i16 : N3VDIntSL16<0b01, op11_8, itinD16,
2886 OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp>;
2887 def v2i32 : N3VDIntSL<0b10, op11_8, itinD32,
2888 OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp>;
2889 def v8i16 : N3VQIntSL16<0b01, op11_8, itinQ16,
2890 OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16, IntOp>;
2891 def v4i32 : N3VQIntSL<0b10, op11_8, itinQ32,
2892 OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32, IntOp>;
2895 // ....then also with element size of 8 bits:
2896 multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2897 InstrItinClass itinD16, InstrItinClass itinD32,
2898 InstrItinClass itinQ16, InstrItinClass itinQ32,
2899 string OpcodeStr, string Dt,
2900 Intrinsic IntOp, bit Commutable = 0>
2901 : N3VInt_HS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2902 OpcodeStr, Dt, IntOp, Commutable> {
2903 def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, f, itinD16,
2904 OpcodeStr, !strconcat(Dt, "8"),
2905 v8i8, v8i8, IntOp, Commutable>;
2906 def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, f, itinQ16,
2907 OpcodeStr, !strconcat(Dt, "8"),
2908 v16i8, v16i8, IntOp, Commutable>;
2910 multiclass N3VInt_QHSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2911 InstrItinClass itinD16, InstrItinClass itinD32,
2912 InstrItinClass itinQ16, InstrItinClass itinQ32,
2913 string OpcodeStr, string Dt,
2915 : N3VInt_HSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2916 OpcodeStr, Dt, IntOp> {
2917 def v8i8 : N3VDIntSh<op24, op23, 0b00, op11_8, op4, f, itinD16,
2918 OpcodeStr, !strconcat(Dt, "8"),
2920 def v16i8 : N3VQIntSh<op24, op23, 0b00, op11_8, op4, f, itinQ16,
2921 OpcodeStr, !strconcat(Dt, "8"),
2922 v16i8, v16i8, IntOp>;
2926 // ....then also with element size of 64 bits:
2927 multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2928 InstrItinClass itinD16, InstrItinClass itinD32,
2929 InstrItinClass itinQ16, InstrItinClass itinQ32,
2930 string OpcodeStr, string Dt,
2931 Intrinsic IntOp, bit Commutable = 0>
2932 : N3VInt_QHS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2933 OpcodeStr, Dt, IntOp, Commutable> {
2934 def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, f, itinD32,
2935 OpcodeStr, !strconcat(Dt, "64"),
2936 v1i64, v1i64, IntOp, Commutable>;
2937 def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, f, itinQ32,
2938 OpcodeStr, !strconcat(Dt, "64"),
2939 v2i64, v2i64, IntOp, Commutable>;
2941 multiclass N3VInt_QHSDSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2942 InstrItinClass itinD16, InstrItinClass itinD32,
2943 InstrItinClass itinQ16, InstrItinClass itinQ32,
2944 string OpcodeStr, string Dt,
2946 : N3VInt_QHSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2947 OpcodeStr, Dt, IntOp> {
2948 def v1i64 : N3VDIntSh<op24, op23, 0b11, op11_8, op4, f, itinD32,
2949 OpcodeStr, !strconcat(Dt, "64"),
2950 v1i64, v1i64, IntOp>;
2951 def v2i64 : N3VQIntSh<op24, op23, 0b11, op11_8, op4, f, itinQ32,
2952 OpcodeStr, !strconcat(Dt, "64"),
2953 v2i64, v2i64, IntOp>;
2956 // Neon Narrowing 3-register vector intrinsics,
2957 // source operand element sizes of 16, 32 and 64 bits:
2958 multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
2959 string OpcodeStr, string Dt,
2960 Intrinsic IntOp, bit Commutable = 0> {
2961 def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4,
2962 OpcodeStr, !strconcat(Dt, "16"),
2963 v8i8, v8i16, IntOp, Commutable>;
2964 def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4,
2965 OpcodeStr, !strconcat(Dt, "32"),
2966 v4i16, v4i32, IntOp, Commutable>;
2967 def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4,
2968 OpcodeStr, !strconcat(Dt, "64"),
2969 v2i32, v2i64, IntOp, Commutable>;
2973 // Neon Long 3-register vector operations.
2975 multiclass N3VL_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2976 InstrItinClass itin16, InstrItinClass itin32,
2977 string OpcodeStr, string Dt,
2978 SDNode OpNode, bit Commutable = 0> {
2979 def v8i16 : N3VL<op24, op23, 0b00, op11_8, op4, itin16,
2980 OpcodeStr, !strconcat(Dt, "8"),
2981 v8i16, v8i8, OpNode, Commutable>;
2982 def v4i32 : N3VL<op24, op23, 0b01, op11_8, op4, itin16,
2983 OpcodeStr, !strconcat(Dt, "16"),
2984 v4i32, v4i16, OpNode, Commutable>;
2985 def v2i64 : N3VL<op24, op23, 0b10, op11_8, op4, itin32,
2986 OpcodeStr, !strconcat(Dt, "32"),
2987 v2i64, v2i32, OpNode, Commutable>;
2990 multiclass N3VLSL_HS<bit op24, bits<4> op11_8,
2991 InstrItinClass itin, string OpcodeStr, string Dt,
2993 def v4i16 : N3VLSL16<op24, 0b01, op11_8, itin, OpcodeStr,
2994 !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
2995 def v2i32 : N3VLSL<op24, 0b10, op11_8, itin, OpcodeStr,
2996 !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
2999 multiclass N3VLExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3000 InstrItinClass itin16, InstrItinClass itin32,
3001 string OpcodeStr, string Dt,
3002 SDNode OpNode, SDNode ExtOp, bit Commutable = 0> {
3003 def v8i16 : N3VLExt<op24, op23, 0b00, op11_8, op4, itin16,
3004 OpcodeStr, !strconcat(Dt, "8"),
3005 v8i16, v8i8, OpNode, ExtOp, Commutable>;
3006 def v4i32 : N3VLExt<op24, op23, 0b01, op11_8, op4, itin16,
3007 OpcodeStr, !strconcat(Dt, "16"),
3008 v4i32, v4i16, OpNode, ExtOp, Commutable>;
3009 def v2i64 : N3VLExt<op24, op23, 0b10, op11_8, op4, itin32,
3010 OpcodeStr, !strconcat(Dt, "32"),
3011 v2i64, v2i32, OpNode, ExtOp, Commutable>;
3014 // Neon Long 3-register vector intrinsics.
3016 // First with only element sizes of 16 and 32 bits:
3017 multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
3018 InstrItinClass itin16, InstrItinClass itin32,
3019 string OpcodeStr, string Dt,
3020 Intrinsic IntOp, bit Commutable = 0> {
3021 def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin16,
3022 OpcodeStr, !strconcat(Dt, "16"),
3023 v4i32, v4i16, IntOp, Commutable>;
3024 def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, itin32,
3025 OpcodeStr, !strconcat(Dt, "32"),
3026 v2i64, v2i32, IntOp, Commutable>;
3029 multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
3030 InstrItinClass itin, string OpcodeStr, string Dt,
3032 def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
3033 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
3034 def v2i32 : N3VLIntSL<op24, 0b10, op11_8, itin,
3035 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3038 // ....then also with element size of 8 bits:
3039 multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3040 InstrItinClass itin16, InstrItinClass itin32,
3041 string OpcodeStr, string Dt,
3042 Intrinsic IntOp, bit Commutable = 0>
3043 : N3VLInt_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt,
3044 IntOp, Commutable> {
3045 def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, itin16,
3046 OpcodeStr, !strconcat(Dt, "8"),
3047 v8i16, v8i8, IntOp, Commutable>;
3050 // ....with explicit extend (VABDL).
3051 multiclass N3VLIntExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3052 InstrItinClass itin, string OpcodeStr, string Dt,
3053 Intrinsic IntOp, SDNode ExtOp, bit Commutable = 0> {
3054 def v8i16 : N3VLIntExt<op24, op23, 0b00, op11_8, op4, itin,
3055 OpcodeStr, !strconcat(Dt, "8"),
3056 v8i16, v8i8, IntOp, ExtOp, Commutable>;
3057 def v4i32 : N3VLIntExt<op24, op23, 0b01, op11_8, op4, itin,
3058 OpcodeStr, !strconcat(Dt, "16"),
3059 v4i32, v4i16, IntOp, ExtOp, Commutable>;
3060 def v2i64 : N3VLIntExt<op24, op23, 0b10, op11_8, op4, itin,
3061 OpcodeStr, !strconcat(Dt, "32"),
3062 v2i64, v2i32, IntOp, ExtOp, Commutable>;
3066 // Neon Wide 3-register vector intrinsics,
3067 // source operand element sizes of 8, 16 and 32 bits:
3068 multiclass N3VW_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3069 string OpcodeStr, string Dt,
3070 SDNode OpNode, SDNode ExtOp, bit Commutable = 0> {
3071 def v8i16 : N3VW<op24, op23, 0b00, op11_8, op4,
3072 OpcodeStr, !strconcat(Dt, "8"),
3073 v8i16, v8i8, OpNode, ExtOp, Commutable>;
3074 def v4i32 : N3VW<op24, op23, 0b01, op11_8, op4,
3075 OpcodeStr, !strconcat(Dt, "16"),
3076 v4i32, v4i16, OpNode, ExtOp, Commutable>;
3077 def v2i64 : N3VW<op24, op23, 0b10, op11_8, op4,
3078 OpcodeStr, !strconcat(Dt, "32"),
3079 v2i64, v2i32, OpNode, ExtOp, Commutable>;
3083 // Neon Multiply-Op vector operations,
3084 // element sizes of 8, 16 and 32 bits:
3085 multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3086 InstrItinClass itinD16, InstrItinClass itinD32,
3087 InstrItinClass itinQ16, InstrItinClass itinQ32,
3088 string OpcodeStr, string Dt, SDNode OpNode> {
3089 // 64-bit vector types.
3090 def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4, itinD16,
3091 OpcodeStr, !strconcat(Dt, "8"), v8i8, mul, OpNode>;
3092 def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4, itinD16,
3093 OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, OpNode>;
3094 def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4, itinD32,
3095 OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, OpNode>;
3097 // 128-bit vector types.
3098 def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4, itinQ16,
3099 OpcodeStr, !strconcat(Dt, "8"), v16i8, mul, OpNode>;
3100 def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4, itinQ16,
3101 OpcodeStr, !strconcat(Dt, "16"), v8i16, mul, OpNode>;
3102 def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4, itinQ32,
3103 OpcodeStr, !strconcat(Dt, "32"), v4i32, mul, OpNode>;
3106 multiclass N3VMulOpSL_HS<bits<4> op11_8,
3107 InstrItinClass itinD16, InstrItinClass itinD32,
3108 InstrItinClass itinQ16, InstrItinClass itinQ32,
3109 string OpcodeStr, string Dt, SDNode ShOp> {
3110 def v4i16 : N3VDMulOpSL16<0b01, op11_8, itinD16,
3111 OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, ShOp>;
3112 def v2i32 : N3VDMulOpSL<0b10, op11_8, itinD32,
3113 OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, ShOp>;
3114 def v8i16 : N3VQMulOpSL16<0b01, op11_8, itinQ16,
3115 OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16,
3117 def v4i32 : N3VQMulOpSL<0b10, op11_8, itinQ32,
3118 OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32,
3122 // Neon Intrinsic-Op vector operations,
3123 // element sizes of 8, 16 and 32 bits:
3124 multiclass N3VIntOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3125 InstrItinClass itinD, InstrItinClass itinQ,
3126 string OpcodeStr, string Dt, Intrinsic IntOp,
3128 // 64-bit vector types.
3129 def v8i8 : N3VDIntOp<op24, op23, 0b00, op11_8, op4, itinD,
3130 OpcodeStr, !strconcat(Dt, "8"), v8i8, IntOp, OpNode>;
3131 def v4i16 : N3VDIntOp<op24, op23, 0b01, op11_8, op4, itinD,
3132 OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp, OpNode>;
3133 def v2i32 : N3VDIntOp<op24, op23, 0b10, op11_8, op4, itinD,
3134 OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp, OpNode>;
3136 // 128-bit vector types.
3137 def v16i8 : N3VQIntOp<op24, op23, 0b00, op11_8, op4, itinQ,
3138 OpcodeStr, !strconcat(Dt, "8"), v16i8, IntOp, OpNode>;
3139 def v8i16 : N3VQIntOp<op24, op23, 0b01, op11_8, op4, itinQ,
3140 OpcodeStr, !strconcat(Dt, "16"), v8i16, IntOp, OpNode>;
3141 def v4i32 : N3VQIntOp<op24, op23, 0b10, op11_8, op4, itinQ,
3142 OpcodeStr, !strconcat(Dt, "32"), v4i32, IntOp, OpNode>;
3145 // Neon 3-argument intrinsics,
3146 // element sizes of 8, 16 and 32 bits:
3147 multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3148 InstrItinClass itinD, InstrItinClass itinQ,
3149 string OpcodeStr, string Dt, Intrinsic IntOp> {
3150 // 64-bit vector types.
3151 def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4, itinD,
3152 OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
3153 def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4, itinD,
3154 OpcodeStr, !strconcat(Dt, "16"), v4i16, v4i16, IntOp>;
3155 def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4, itinD,
3156 OpcodeStr, !strconcat(Dt, "32"), v2i32, v2i32, IntOp>;
3158 // 128-bit vector types.
3159 def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4, itinQ,
3160 OpcodeStr, !strconcat(Dt, "8"), v16i8, v16i8, IntOp>;
3161 def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4, itinQ,
3162 OpcodeStr, !strconcat(Dt, "16"), v8i16, v8i16, IntOp>;
3163 def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4, itinQ,
3164 OpcodeStr, !strconcat(Dt, "32"), v4i32, v4i32, IntOp>;
3168 // Neon Long Multiply-Op vector operations,
3169 // element sizes of 8, 16 and 32 bits:
3170 multiclass N3VLMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3171 InstrItinClass itin16, InstrItinClass itin32,
3172 string OpcodeStr, string Dt, SDNode MulOp,
3174 def v8i16 : N3VLMulOp<op24, op23, 0b00, op11_8, op4, itin16, OpcodeStr,
3175 !strconcat(Dt, "8"), v8i16, v8i8, MulOp, OpNode>;
3176 def v4i32 : N3VLMulOp<op24, op23, 0b01, op11_8, op4, itin16, OpcodeStr,
3177 !strconcat(Dt, "16"), v4i32, v4i16, MulOp, OpNode>;
3178 def v2i64 : N3VLMulOp<op24, op23, 0b10, op11_8, op4, itin32, OpcodeStr,
3179 !strconcat(Dt, "32"), v2i64, v2i32, MulOp, OpNode>;
3182 multiclass N3VLMulOpSL_HS<bit op24, bits<4> op11_8, string OpcodeStr,
3183 string Dt, SDNode MulOp, SDNode OpNode> {
3184 def v4i16 : N3VLMulOpSL16<op24, 0b01, op11_8, IIC_VMACi16D, OpcodeStr,
3185 !strconcat(Dt,"16"), v4i32, v4i16, MulOp, OpNode>;
3186 def v2i32 : N3VLMulOpSL<op24, 0b10, op11_8, IIC_VMACi32D, OpcodeStr,
3187 !strconcat(Dt, "32"), v2i64, v2i32, MulOp, OpNode>;
3191 // Neon Long 3-argument intrinsics.
3193 // First with only element sizes of 16 and 32 bits:
3194 multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
3195 InstrItinClass itin16, InstrItinClass itin32,
3196 string OpcodeStr, string Dt, Intrinsic IntOp> {
3197 def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4, itin16,
3198 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
3199 def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4, itin32,
3200 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3203 multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
3204 string OpcodeStr, string Dt, Intrinsic IntOp> {
3205 def v4i16 : N3VLInt3SL16<op24, 0b01, op11_8, IIC_VMACi16D,
3206 OpcodeStr, !strconcat(Dt,"16"), v4i32, v4i16, IntOp>;
3207 def v2i32 : N3VLInt3SL<op24, 0b10, op11_8, IIC_VMACi32D,
3208 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3211 // ....then also with element size of 8 bits:
3212 multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3213 InstrItinClass itin16, InstrItinClass itin32,
3214 string OpcodeStr, string Dt, Intrinsic IntOp>
3215 : N3VLInt3_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt, IntOp> {
3216 def v8i16 : N3VLInt3<op24, op23, 0b00, op11_8, op4, itin16,
3217 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, IntOp>;
3220 // ....with explicit extend (VABAL).
3221 multiclass N3VLIntExtOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3222 InstrItinClass itin, string OpcodeStr, string Dt,
3223 Intrinsic IntOp, SDNode ExtOp, SDNode OpNode> {
3224 def v8i16 : N3VLIntExtOp<op24, op23, 0b00, op11_8, op4, itin,
3225 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8,
3226 IntOp, ExtOp, OpNode>;
3227 def v4i32 : N3VLIntExtOp<op24, op23, 0b01, op11_8, op4, itin,
3228 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16,
3229 IntOp, ExtOp, OpNode>;
3230 def v2i64 : N3VLIntExtOp<op24, op23, 0b10, op11_8, op4, itin,
3231 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32,
3232 IntOp, ExtOp, OpNode>;
3236 // Neon Pairwise long 2-register intrinsics,
3237 // element sizes of 8, 16 and 32 bits:
3238 multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
3239 bits<5> op11_7, bit op4,
3240 string OpcodeStr, string Dt, Intrinsic IntOp> {
3241 // 64-bit vector types.
3242 def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3243 OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
3244 def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3245 OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
3246 def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3247 OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
3249 // 128-bit vector types.
3250 def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3251 OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
3252 def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3253 OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
3254 def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3255 OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
3259 // Neon Pairwise long 2-register accumulate intrinsics,
3260 // element sizes of 8, 16 and 32 bits:
3261 multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
3262 bits<5> op11_7, bit op4,
3263 string OpcodeStr, string Dt, Intrinsic IntOp> {
3264 // 64-bit vector types.
3265 def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3266 OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
3267 def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3268 OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
3269 def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3270 OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
3272 // 128-bit vector types.
3273 def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3274 OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
3275 def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3276 OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
3277 def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3278 OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
3282 // Neon 2-register vector shift by immediate,
3283 // with f of either N2RegVShLFrm or N2RegVShRFrm
3284 // element sizes of 8, 16, 32 and 64 bits:
3285 multiclass N2VShL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3286 InstrItinClass itin, string OpcodeStr, string Dt,
3288 // 64-bit vector types.
3289 def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3290 OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
3291 let Inst{21-19} = 0b001; // imm6 = 001xxx
3293 def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3294 OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
3295 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3297 def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3298 OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
3299 let Inst{21} = 0b1; // imm6 = 1xxxxx
3301 def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, N2RegVShLFrm, itin, i32imm,
3302 OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
3305 // 128-bit vector types.
3306 def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3307 OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
3308 let Inst{21-19} = 0b001; // imm6 = 001xxx
3310 def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3311 OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
3312 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3314 def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3315 OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
3316 let Inst{21} = 0b1; // imm6 = 1xxxxx
3318 def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShLFrm, itin, i32imm,
3319 OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
3322 multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3323 InstrItinClass itin, string OpcodeStr, string Dt,
3325 // 64-bit vector types.
3326 def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
3327 OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
3328 let Inst{21-19} = 0b001; // imm6 = 001xxx
3330 def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm16,
3331 OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
3332 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3334 def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm32,
3335 OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
3336 let Inst{21} = 0b1; // imm6 = 1xxxxx
3338 def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
3339 OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
3342 // 128-bit vector types.
3343 def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
3344 OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
3345 let Inst{21-19} = 0b001; // imm6 = 001xxx
3347 def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm16,
3348 OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
3349 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3351 def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm32,
3352 OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
3353 let Inst{21} = 0b1; // imm6 = 1xxxxx
3355 def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
3356 OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
3360 // Neon Shift-Accumulate vector operations,
3361 // element sizes of 8, 16, 32 and 64 bits:
3362 multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3363 string OpcodeStr, string Dt, SDNode ShOp> {
3364 // 64-bit vector types.
3365 def v8i8 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
3366 OpcodeStr, !strconcat(Dt, "8"), v8i8, ShOp> {
3367 let Inst{21-19} = 0b001; // imm6 = 001xxx
3369 def v4i16 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
3370 OpcodeStr, !strconcat(Dt, "16"), v4i16, ShOp> {
3371 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3373 def v2i32 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
3374 OpcodeStr, !strconcat(Dt, "32"), v2i32, ShOp> {
3375 let Inst{21} = 0b1; // imm6 = 1xxxxx
3377 def v1i64 : N2VDShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
3378 OpcodeStr, !strconcat(Dt, "64"), v1i64, ShOp>;
3381 // 128-bit vector types.
3382 def v16i8 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
3383 OpcodeStr, !strconcat(Dt, "8"), v16i8, ShOp> {
3384 let Inst{21-19} = 0b001; // imm6 = 001xxx
3386 def v8i16 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
3387 OpcodeStr, !strconcat(Dt, "16"), v8i16, ShOp> {
3388 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3390 def v4i32 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
3391 OpcodeStr, !strconcat(Dt, "32"), v4i32, ShOp> {
3392 let Inst{21} = 0b1; // imm6 = 1xxxxx
3394 def v2i64 : N2VQShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
3395 OpcodeStr, !strconcat(Dt, "64"), v2i64, ShOp>;
3399 // Neon Shift-Insert vector operations,
3400 // with f of either N2RegVShLFrm or N2RegVShRFrm
3401 // element sizes of 8, 16, 32 and 64 bits:
3402 multiclass N2VShInsL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3404 // 64-bit vector types.
3405 def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3406 N2RegVShLFrm, OpcodeStr, "8", v8i8, NEONvsli> {
3407 let Inst{21-19} = 0b001; // imm6 = 001xxx
3409 def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3410 N2RegVShLFrm, OpcodeStr, "16", v4i16, NEONvsli> {
3411 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3413 def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3414 N2RegVShLFrm, OpcodeStr, "32", v2i32, NEONvsli> {
3415 let Inst{21} = 0b1; // imm6 = 1xxxxx
3417 def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4, i32imm,
3418 N2RegVShLFrm, OpcodeStr, "64", v1i64, NEONvsli>;
3421 // 128-bit vector types.
3422 def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3423 N2RegVShLFrm, OpcodeStr, "8", v16i8, NEONvsli> {
3424 let Inst{21-19} = 0b001; // imm6 = 001xxx
3426 def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3427 N2RegVShLFrm, OpcodeStr, "16", v8i16, NEONvsli> {
3428 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3430 def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3431 N2RegVShLFrm, OpcodeStr, "32", v4i32, NEONvsli> {
3432 let Inst{21} = 0b1; // imm6 = 1xxxxx
3434 def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4, i32imm,
3435 N2RegVShLFrm, OpcodeStr, "64", v2i64, NEONvsli>;
3438 multiclass N2VShInsR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3440 // 64-bit vector types.
3441 def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm8,
3442 N2RegVShRFrm, OpcodeStr, "8", v8i8, NEONvsri> {
3443 let Inst{21-19} = 0b001; // imm6 = 001xxx
3445 def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm16,
3446 N2RegVShRFrm, OpcodeStr, "16", v4i16, NEONvsri> {
3447 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3449 def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm32,
3450 N2RegVShRFrm, OpcodeStr, "32", v2i32, NEONvsri> {
3451 let Inst{21} = 0b1; // imm6 = 1xxxxx
3453 def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4, shr_imm64,
3454 N2RegVShRFrm, OpcodeStr, "64", v1i64, NEONvsri>;
3457 // 128-bit vector types.
3458 def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm8,
3459 N2RegVShRFrm, OpcodeStr, "8", v16i8, NEONvsri> {
3460 let Inst{21-19} = 0b001; // imm6 = 001xxx
3462 def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm16,
3463 N2RegVShRFrm, OpcodeStr, "16", v8i16, NEONvsri> {
3464 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3466 def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm32,
3467 N2RegVShRFrm, OpcodeStr, "32", v4i32, NEONvsri> {
3468 let Inst{21} = 0b1; // imm6 = 1xxxxx
3470 def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4, shr_imm64,
3471 N2RegVShRFrm, OpcodeStr, "64", v2i64, NEONvsri>;
3475 // Neon Shift Long operations,
3476 // element sizes of 8, 16, 32 bits:
3477 multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
3478 bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
3479 def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3480 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode> {
3481 let Inst{21-19} = 0b001; // imm6 = 001xxx
3483 def v4i32 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3484 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode> {
3485 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3487 def v2i64 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3488 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode> {
3489 let Inst{21} = 0b1; // imm6 = 1xxxxx
3493 // Neon Shift Narrow operations,
3494 // element sizes of 16, 32, 64 bits:
3495 multiclass N2VNSh_HSD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
3496 bit op4, InstrItinClass itin, string OpcodeStr, string Dt,
3498 def v8i8 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3499 OpcodeStr, !strconcat(Dt, "16"),
3500 v8i8, v8i16, shr_imm8, OpNode> {
3501 let Inst{21-19} = 0b001; // imm6 = 001xxx
3503 def v4i16 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3504 OpcodeStr, !strconcat(Dt, "32"),
3505 v4i16, v4i32, shr_imm16, OpNode> {
3506 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3508 def v2i32 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3509 OpcodeStr, !strconcat(Dt, "64"),
3510 v2i32, v2i64, shr_imm32, OpNode> {
3511 let Inst{21} = 0b1; // imm6 = 1xxxxx
3515 //===----------------------------------------------------------------------===//
3516 // Instruction Definitions.
3517 //===----------------------------------------------------------------------===//
3519 // Vector Add Operations.
3521 // VADD : Vector Add (integer and floating-point)
3522 defm VADD : N3V_QHSD<0, 0, 0b1000, 0, IIC_VBINiD, IIC_VBINiQ, "vadd", "i",
3524 def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, IIC_VBIND, "vadd", "f32",
3525 v2f32, v2f32, fadd, 1>;
3526 def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, IIC_VBINQ, "vadd", "f32",
3527 v4f32, v4f32, fadd, 1>;
3528 // VADDL : Vector Add Long (Q = D + D)
3529 defm VADDLs : N3VLExt_QHS<0,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD,
3530 "vaddl", "s", add, sext, 1>;
3531 defm VADDLu : N3VLExt_QHS<1,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD,
3532 "vaddl", "u", add, zext, 1>;
3533 // VADDW : Vector Add Wide (Q = Q + D)
3534 defm VADDWs : N3VW_QHS<0,1,0b0001,0, "vaddw", "s", add, sext, 0>;
3535 defm VADDWu : N3VW_QHS<1,1,0b0001,0, "vaddw", "u", add, zext, 0>;
3536 // VHADD : Vector Halving Add
3537 defm VHADDs : N3VInt_QHS<0, 0, 0b0000, 0, N3RegFrm,
3538 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3539 "vhadd", "s", int_arm_neon_vhadds, 1>;
3540 defm VHADDu : N3VInt_QHS<1, 0, 0b0000, 0, N3RegFrm,
3541 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3542 "vhadd", "u", int_arm_neon_vhaddu, 1>;
3543 // VRHADD : Vector Rounding Halving Add
3544 defm VRHADDs : N3VInt_QHS<0, 0, 0b0001, 0, N3RegFrm,
3545 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3546 "vrhadd", "s", int_arm_neon_vrhadds, 1>;
3547 defm VRHADDu : N3VInt_QHS<1, 0, 0b0001, 0, N3RegFrm,
3548 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3549 "vrhadd", "u", int_arm_neon_vrhaddu, 1>;
3550 // VQADD : Vector Saturating Add
3551 defm VQADDs : N3VInt_QHSD<0, 0, 0b0000, 1, N3RegFrm,
3552 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3553 "vqadd", "s", int_arm_neon_vqadds, 1>;
3554 defm VQADDu : N3VInt_QHSD<1, 0, 0b0000, 1, N3RegFrm,
3555 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3556 "vqadd", "u", int_arm_neon_vqaddu, 1>;
3557 // VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
3558 defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn", "i",
3559 int_arm_neon_vaddhn, 1>;
3560 // VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
3561 defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn", "i",
3562 int_arm_neon_vraddhn, 1>;
3564 // Vector Multiply Operations.
3566 // VMUL : Vector Multiply (integer, polynomial and floating-point)
3567 defm VMUL : N3V_QHS<0, 0, 0b1001, 1, IIC_VMULi16D, IIC_VMULi32D,
3568 IIC_VMULi16Q, IIC_VMULi32Q, "vmul", "i", mul, 1>;
3569 def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16D, "vmul",
3570 "p8", v8i8, v8i8, int_arm_neon_vmulp, 1>;
3571 def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16Q, "vmul",
3572 "p8", v16i8, v16i8, int_arm_neon_vmulp, 1>;
3573 def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VFMULD, "vmul", "f32",
3574 v2f32, v2f32, fmul, 1>;
3575 def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VFMULQ, "vmul", "f32",
3576 v4f32, v4f32, fmul, 1>;
3577 defm VMULsl : N3VSL_HS<0b1000, "vmul", "i", mul>;
3578 def VMULslfd : N3VDSL<0b10, 0b1001, IIC_VBIND, "vmul", "f32", v2f32, fmul>;
3579 def VMULslfq : N3VQSL<0b10, 0b1001, IIC_VBINQ, "vmul", "f32", v4f32,
3582 def : Pat<(v8i16 (mul (v8i16 QPR:$src1),
3583 (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
3584 (v8i16 (VMULslv8i16 (v8i16 QPR:$src1),
3585 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3586 (DSubReg_i16_reg imm:$lane))),
3587 (SubReg_i16_lane imm:$lane)))>;
3588 def : Pat<(v4i32 (mul (v4i32 QPR:$src1),
3589 (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
3590 (v4i32 (VMULslv4i32 (v4i32 QPR:$src1),
3591 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3592 (DSubReg_i32_reg imm:$lane))),
3593 (SubReg_i32_lane imm:$lane)))>;
3594 def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
3595 (v4f32 (NEONvduplane (v4f32 QPR:$src2), imm:$lane)))),
3596 (v4f32 (VMULslfq (v4f32 QPR:$src1),
3597 (v2f32 (EXTRACT_SUBREG QPR:$src2,
3598 (DSubReg_i32_reg imm:$lane))),
3599 (SubReg_i32_lane imm:$lane)))>;
3601 // VQDMULH : Vector Saturating Doubling Multiply Returning High Half
3602 defm VQDMULH : N3VInt_HS<0, 0, 0b1011, 0, N3RegFrm, IIC_VMULi16D, IIC_VMULi32D,
3603 IIC_VMULi16Q, IIC_VMULi32Q,
3604 "vqdmulh", "s", int_arm_neon_vqdmulh, 1>;
3605 defm VQDMULHsl: N3VIntSL_HS<0b1100, IIC_VMULi16D, IIC_VMULi32D,
3606 IIC_VMULi16Q, IIC_VMULi32Q,
3607 "vqdmulh", "s", int_arm_neon_vqdmulh>;
3608 def : Pat<(v8i16 (int_arm_neon_vqdmulh (v8i16 QPR:$src1),
3609 (v8i16 (NEONvduplane (v8i16 QPR:$src2),
3611 (v8i16 (VQDMULHslv8i16 (v8i16 QPR:$src1),
3612 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3613 (DSubReg_i16_reg imm:$lane))),
3614 (SubReg_i16_lane imm:$lane)))>;
3615 def : Pat<(v4i32 (int_arm_neon_vqdmulh (v4i32 QPR:$src1),
3616 (v4i32 (NEONvduplane (v4i32 QPR:$src2),
3618 (v4i32 (VQDMULHslv4i32 (v4i32 QPR:$src1),
3619 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3620 (DSubReg_i32_reg imm:$lane))),
3621 (SubReg_i32_lane imm:$lane)))>;
3623 // VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
3624 defm VQRDMULH : N3VInt_HS<1, 0, 0b1011, 0, N3RegFrm,
3625 IIC_VMULi16D,IIC_VMULi32D,IIC_VMULi16Q,IIC_VMULi32Q,
3626 "vqrdmulh", "s", int_arm_neon_vqrdmulh, 1>;
3627 defm VQRDMULHsl : N3VIntSL_HS<0b1101, IIC_VMULi16D, IIC_VMULi32D,
3628 IIC_VMULi16Q, IIC_VMULi32Q,
3629 "vqrdmulh", "s", int_arm_neon_vqrdmulh>;
3630 def : Pat<(v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$src1),
3631 (v8i16 (NEONvduplane (v8i16 QPR:$src2),
3633 (v8i16 (VQRDMULHslv8i16 (v8i16 QPR:$src1),
3634 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3635 (DSubReg_i16_reg imm:$lane))),
3636 (SubReg_i16_lane imm:$lane)))>;
3637 def : Pat<(v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$src1),
3638 (v4i32 (NEONvduplane (v4i32 QPR:$src2),
3640 (v4i32 (VQRDMULHslv4i32 (v4i32 QPR:$src1),
3641 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3642 (DSubReg_i32_reg imm:$lane))),
3643 (SubReg_i32_lane imm:$lane)))>;
3645 // VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
3646 defm VMULLs : N3VL_QHS<0,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
3647 "vmull", "s", NEONvmulls, 1>;
3648 defm VMULLu : N3VL_QHS<1,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
3649 "vmull", "u", NEONvmullu, 1>;
3650 def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, IIC_VMULi16D, "vmull", "p8",
3651 v8i16, v8i8, int_arm_neon_vmullp, 1>;
3652 defm VMULLsls : N3VLSL_HS<0, 0b1010, IIC_VMULi16D, "vmull", "s", NEONvmulls>;
3653 defm VMULLslu : N3VLSL_HS<1, 0b1010, IIC_VMULi16D, "vmull", "u", NEONvmullu>;
3655 // VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
3656 defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, IIC_VMULi16D, IIC_VMULi32D,
3657 "vqdmull", "s", int_arm_neon_vqdmull, 1>;
3658 defm VQDMULLsl: N3VLIntSL_HS<0, 0b1011, IIC_VMULi16D,
3659 "vqdmull", "s", int_arm_neon_vqdmull>;
3661 // Vector Multiply-Accumulate and Multiply-Subtract Operations.
3663 // VMLA : Vector Multiply Accumulate (integer and floating-point)
3664 defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3665 IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
3666 def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACD, "vmla", "f32",
3667 v2f32, fmul_su, fadd_mlx>,
3668 Requires<[HasNEON, UseFPVMLx]>;
3669 def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACQ, "vmla", "f32",
3670 v4f32, fmul_su, fadd_mlx>,
3671 Requires<[HasNEON, UseFPVMLx]>;
3672 defm VMLAsl : N3VMulOpSL_HS<0b0000, IIC_VMACi16D, IIC_VMACi32D,
3673 IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
3674 def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, IIC_VMACD, "vmla", "f32",
3675 v2f32, fmul_su, fadd_mlx>,
3676 Requires<[HasNEON, UseFPVMLx]>;
3677 def VMLAslfq : N3VQMulOpSL<0b10, 0b0001, IIC_VMACQ, "vmla", "f32",
3678 v4f32, v2f32, fmul_su, fadd_mlx>,
3679 Requires<[HasNEON, UseFPVMLx]>;
3681 def : Pat<(v8i16 (add (v8i16 QPR:$src1),
3682 (mul (v8i16 QPR:$src2),
3683 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
3684 (v8i16 (VMLAslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
3685 (v4i16 (EXTRACT_SUBREG QPR:$src3,
3686 (DSubReg_i16_reg imm:$lane))),
3687 (SubReg_i16_lane imm:$lane)))>;
3689 def : Pat<(v4i32 (add (v4i32 QPR:$src1),
3690 (mul (v4i32 QPR:$src2),
3691 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
3692 (v4i32 (VMLAslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
3693 (v2i32 (EXTRACT_SUBREG QPR:$src3,
3694 (DSubReg_i32_reg imm:$lane))),
3695 (SubReg_i32_lane imm:$lane)))>;
3697 def : Pat<(v4f32 (fadd_mlx (v4f32 QPR:$src1),
3698 (fmul_su (v4f32 QPR:$src2),
3699 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
3700 (v4f32 (VMLAslfq (v4f32 QPR:$src1),
3702 (v2f32 (EXTRACT_SUBREG QPR:$src3,
3703 (DSubReg_i32_reg imm:$lane))),
3704 (SubReg_i32_lane imm:$lane)))>,
3705 Requires<[HasNEON, UseFPVMLx]>;
3707 // VMLAL : Vector Multiply Accumulate Long (Q += D * D)
3708 defm VMLALs : N3VLMulOp_QHS<0,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
3709 "vmlal", "s", NEONvmulls, add>;
3710 defm VMLALu : N3VLMulOp_QHS<1,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
3711 "vmlal", "u", NEONvmullu, add>;
3713 defm VMLALsls : N3VLMulOpSL_HS<0, 0b0010, "vmlal", "s", NEONvmulls, add>;
3714 defm VMLALslu : N3VLMulOpSL_HS<1, 0b0010, "vmlal", "u", NEONvmullu, add>;
3716 // VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
3717 defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3718 "vqdmlal", "s", int_arm_neon_vqdmlal>;
3719 defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", int_arm_neon_vqdmlal>;
3721 // VMLS : Vector Multiply Subtract (integer and floating-point)
3722 defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3723 IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
3724 def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACD, "vmls", "f32",
3725 v2f32, fmul_su, fsub_mlx>,
3726 Requires<[HasNEON, UseFPVMLx]>;
3727 def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACQ, "vmls", "f32",
3728 v4f32, fmul_su, fsub_mlx>,
3729 Requires<[HasNEON, UseFPVMLx]>;
3730 defm VMLSsl : N3VMulOpSL_HS<0b0100, IIC_VMACi16D, IIC_VMACi32D,
3731 IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
3732 def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, IIC_VMACD, "vmls", "f32",
3733 v2f32, fmul_su, fsub_mlx>,
3734 Requires<[HasNEON, UseFPVMLx]>;
3735 def VMLSslfq : N3VQMulOpSL<0b10, 0b0101, IIC_VMACQ, "vmls", "f32",
3736 v4f32, v2f32, fmul_su, fsub_mlx>,
3737 Requires<[HasNEON, UseFPVMLx]>;
3739 def : Pat<(v8i16 (sub (v8i16 QPR:$src1),
3740 (mul (v8i16 QPR:$src2),
3741 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
3742 (v8i16 (VMLSslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
3743 (v4i16 (EXTRACT_SUBREG QPR:$src3,
3744 (DSubReg_i16_reg imm:$lane))),
3745 (SubReg_i16_lane imm:$lane)))>;
3747 def : Pat<(v4i32 (sub (v4i32 QPR:$src1),
3748 (mul (v4i32 QPR:$src2),
3749 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
3750 (v4i32 (VMLSslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
3751 (v2i32 (EXTRACT_SUBREG QPR:$src3,
3752 (DSubReg_i32_reg imm:$lane))),
3753 (SubReg_i32_lane imm:$lane)))>;
3755 def : Pat<(v4f32 (fsub_mlx (v4f32 QPR:$src1),
3756 (fmul_su (v4f32 QPR:$src2),
3757 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
3758 (v4f32 (VMLSslfq (v4f32 QPR:$src1), (v4f32 QPR:$src2),
3759 (v2f32 (EXTRACT_SUBREG QPR:$src3,
3760 (DSubReg_i32_reg imm:$lane))),
3761 (SubReg_i32_lane imm:$lane)))>,
3762 Requires<[HasNEON, UseFPVMLx]>;
3764 // VMLSL : Vector Multiply Subtract Long (Q -= D * D)
3765 defm VMLSLs : N3VLMulOp_QHS<0,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
3766 "vmlsl", "s", NEONvmulls, sub>;
3767 defm VMLSLu : N3VLMulOp_QHS<1,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
3768 "vmlsl", "u", NEONvmullu, sub>;
3770 defm VMLSLsls : N3VLMulOpSL_HS<0, 0b0110, "vmlsl", "s", NEONvmulls, sub>;
3771 defm VMLSLslu : N3VLMulOpSL_HS<1, 0b0110, "vmlsl", "u", NEONvmullu, sub>;
3773 // VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
3774 defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, IIC_VMACi16D, IIC_VMACi32D,
3775 "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
3776 defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
3778 // Vector Subtract Operations.
3780 // VSUB : Vector Subtract (integer and floating-point)
3781 defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, IIC_VSUBiD, IIC_VSUBiQ,
3782 "vsub", "i", sub, 0>;
3783 def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, IIC_VBIND, "vsub", "f32",
3784 v2f32, v2f32, fsub, 0>;
3785 def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, IIC_VBINQ, "vsub", "f32",
3786 v4f32, v4f32, fsub, 0>;
3787 // VSUBL : Vector Subtract Long (Q = D - D)
3788 defm VSUBLs : N3VLExt_QHS<0,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD,
3789 "vsubl", "s", sub, sext, 0>;
3790 defm VSUBLu : N3VLExt_QHS<1,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD,
3791 "vsubl", "u", sub, zext, 0>;
3792 // VSUBW : Vector Subtract Wide (Q = Q - D)
3793 defm VSUBWs : N3VW_QHS<0,1,0b0011,0, "vsubw", "s", sub, sext, 0>;
3794 defm VSUBWu : N3VW_QHS<1,1,0b0011,0, "vsubw", "u", sub, zext, 0>;
3795 // VHSUB : Vector Halving Subtract
3796 defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, N3RegFrm,
3797 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3798 "vhsub", "s", int_arm_neon_vhsubs, 0>;
3799 defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, N3RegFrm,
3800 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3801 "vhsub", "u", int_arm_neon_vhsubu, 0>;
3802 // VQSUB : Vector Saturing Subtract
3803 defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, N3RegFrm,
3804 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3805 "vqsub", "s", int_arm_neon_vqsubs, 0>;
3806 defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, N3RegFrm,
3807 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3808 "vqsub", "u", int_arm_neon_vqsubu, 0>;
3809 // VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
3810 defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn", "i",
3811 int_arm_neon_vsubhn, 0>;
3812 // VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
3813 defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn", "i",
3814 int_arm_neon_vrsubhn, 0>;
3816 // Vector Comparisons.
3818 // VCEQ : Vector Compare Equal
3819 defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3820 IIC_VSUBi4Q, "vceq", "i", NEONvceq, 1>;
3821 def VCEQfd : N3VD<0,0,0b00,0b1110,0, IIC_VBIND, "vceq", "f32", v2i32, v2f32,
3823 def VCEQfq : N3VQ<0,0,0b00,0b1110,0, IIC_VBINQ, "vceq", "f32", v4i32, v4f32,
3826 defm VCEQz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
3827 "$Vd, $Vm, #0", NEONvceqz>;
3829 // VCGE : Vector Compare Greater Than or Equal
3830 defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3831 IIC_VSUBi4Q, "vcge", "s", NEONvcge, 0>;
3832 defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3833 IIC_VSUBi4Q, "vcge", "u", NEONvcgeu, 0>;
3834 def VCGEfd : N3VD<1,0,0b00,0b1110,0, IIC_VBIND, "vcge", "f32", v2i32, v2f32,
3836 def VCGEfq : N3VQ<1,0,0b00,0b1110,0, IIC_VBINQ, "vcge", "f32", v4i32, v4f32,
3839 defm VCGEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00001, 0, "vcge", "s",
3840 "$Vd, $Vm, #0", NEONvcgez>;
3841 defm VCLEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00011, 0, "vcle", "s",
3842 "$Vd, $Vm, #0", NEONvclez>;
3844 // VCGT : Vector Compare Greater Than
3845 defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3846 IIC_VSUBi4Q, "vcgt", "s", NEONvcgt, 0>;
3847 defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3848 IIC_VSUBi4Q, "vcgt", "u", NEONvcgtu, 0>;
3849 def VCGTfd : N3VD<1,0,0b10,0b1110,0, IIC_VBIND, "vcgt", "f32", v2i32, v2f32,
3851 def VCGTfq : N3VQ<1,0,0b10,0b1110,0, IIC_VBINQ, "vcgt", "f32", v4i32, v4f32,
3854 defm VCGTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00000, 0, "vcgt", "s",
3855 "$Vd, $Vm, #0", NEONvcgtz>;
3856 defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s",
3857 "$Vd, $Vm, #0", NEONvcltz>;
3859 // VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
3860 def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacge",
3861 "f32", v2i32, v2f32, int_arm_neon_vacged, 0>;
3862 def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacge",
3863 "f32", v4i32, v4f32, int_arm_neon_vacgeq, 0>;
3864 // VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
3865 def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacgt",
3866 "f32", v2i32, v2f32, int_arm_neon_vacgtd, 0>;
3867 def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacgt",
3868 "f32", v4i32, v4f32, int_arm_neon_vacgtq, 0>;
3869 // VTST : Vector Test Bits
3870 defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
3871 IIC_VBINi4Q, "vtst", "", NEONvtst, 1>;
3873 // Vector Bitwise Operations.
3875 def vnotd : PatFrag<(ops node:$in),
3876 (xor node:$in, (bitconvert (v8i8 NEONimmAllOnesV)))>;
3877 def vnotq : PatFrag<(ops node:$in),
3878 (xor node:$in, (bitconvert (v16i8 NEONimmAllOnesV)))>;
3881 // VAND : Vector Bitwise AND
3882 def VANDd : N3VDX<0, 0, 0b00, 0b0001, 1, IIC_VBINiD, "vand",
3883 v2i32, v2i32, and, 1>;
3884 def VANDq : N3VQX<0, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "vand",
3885 v4i32, v4i32, and, 1>;
3887 // VEOR : Vector Bitwise Exclusive OR
3888 def VEORd : N3VDX<1, 0, 0b00, 0b0001, 1, IIC_VBINiD, "veor",
3889 v2i32, v2i32, xor, 1>;
3890 def VEORq : N3VQX<1, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "veor",
3891 v4i32, v4i32, xor, 1>;
3893 // VORR : Vector Bitwise OR
3894 def VORRd : N3VDX<0, 0, 0b10, 0b0001, 1, IIC_VBINiD, "vorr",
3895 v2i32, v2i32, or, 1>;
3896 def VORRq : N3VQX<0, 0, 0b10, 0b0001, 1, IIC_VBINiQ, "vorr",
3897 v4i32, v4i32, or, 1>;
3899 def VORRiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 0, 1,
3900 (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
3902 "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
3904 (v4i16 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
3905 let Inst{9} = SIMM{9};
3908 def VORRiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 0, 1,
3909 (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
3911 "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
3913 (v2i32 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
3914 let Inst{10-9} = SIMM{10-9};
3917 def VORRiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 0, 1,
3918 (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
3920 "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
3922 (v8i16 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
3923 let Inst{9} = SIMM{9};
3926 def VORRiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 0, 1,
3927 (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
3929 "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
3931 (v4i32 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
3932 let Inst{10-9} = SIMM{10-9};
3936 // VBIC : Vector Bitwise Bit Clear (AND NOT)
3937 def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
3938 (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
3939 "vbic", "$Vd, $Vn, $Vm", "",
3940 [(set DPR:$Vd, (v2i32 (and DPR:$Vn,
3941 (vnotd DPR:$Vm))))]>;
3942 def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
3943 (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
3944 "vbic", "$Vd, $Vn, $Vm", "",
3945 [(set QPR:$Vd, (v4i32 (and QPR:$Vn,
3946 (vnotq QPR:$Vm))))]>;
3948 def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
3949 (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
3951 "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
3953 (v4i16 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
3954 let Inst{9} = SIMM{9};
3957 def VBICiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 1, 1,
3958 (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
3960 "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
3962 (v2i32 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
3963 let Inst{10-9} = SIMM{10-9};
3966 def VBICiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 1, 1,
3967 (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
3969 "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
3971 (v8i16 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
3972 let Inst{9} = SIMM{9};
3975 def VBICiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 1, 1,
3976 (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
3978 "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
3980 (v4i32 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
3981 let Inst{10-9} = SIMM{10-9};
3984 // VORN : Vector Bitwise OR NOT
3985 def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$Vd),
3986 (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
3987 "vorn", "$Vd, $Vn, $Vm", "",
3988 [(set DPR:$Vd, (v2i32 (or DPR:$Vn,
3989 (vnotd DPR:$Vm))))]>;
3990 def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$Vd),
3991 (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
3992 "vorn", "$Vd, $Vn, $Vm", "",
3993 [(set QPR:$Vd, (v4i32 (or QPR:$Vn,
3994 (vnotq QPR:$Vm))))]>;
3996 // VMVN : Vector Bitwise NOT (Immediate)
3998 let isReMaterializable = 1 in {
4000 def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd),
4001 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4002 "vmvn", "i16", "$Vd, $SIMM", "",
4003 [(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> {
4004 let Inst{9} = SIMM{9};
4007 def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd),
4008 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4009 "vmvn", "i16", "$Vd, $SIMM", "",
4010 [(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> {
4011 let Inst{9} = SIMM{9};
4014 def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd),
4015 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4016 "vmvn", "i32", "$Vd, $SIMM", "",
4017 [(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> {
4018 let Inst{11-8} = SIMM{11-8};
4021 def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd),
4022 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4023 "vmvn", "i32", "$Vd, $SIMM", "",
4024 [(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> {
4025 let Inst{11-8} = SIMM{11-8};
4029 // VMVN : Vector Bitwise NOT
4030 def VMVNd : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
4031 (outs DPR:$Vd), (ins DPR:$Vm), IIC_VSUBiD,
4032 "vmvn", "$Vd, $Vm", "",
4033 [(set DPR:$Vd, (v2i32 (vnotd DPR:$Vm)))]>;
4034 def VMVNq : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
4035 (outs QPR:$Vd), (ins QPR:$Vm), IIC_VSUBiD,
4036 "vmvn", "$Vd, $Vm", "",
4037 [(set QPR:$Vd, (v4i32 (vnotq QPR:$Vm)))]>;
4038 def : Pat<(v2i32 (vnotd DPR:$src)), (VMVNd DPR:$src)>;
4039 def : Pat<(v4i32 (vnotq QPR:$src)), (VMVNq QPR:$src)>;
4041 // VBSL : Vector Bitwise Select
4042 def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
4043 (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4044 N3RegFrm, IIC_VCNTiD,
4045 "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4047 (v2i32 (NEONvbsl DPR:$src1, DPR:$Vn, DPR:$Vm)))]>;
4049 def : Pat<(v2i32 (or (and DPR:$Vn, DPR:$Vd),
4050 (and DPR:$Vm, (vnotd DPR:$Vd)))),
4051 (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>;
4053 def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
4054 (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4055 N3RegFrm, IIC_VCNTiQ,
4056 "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4058 (v4i32 (NEONvbsl QPR:$src1, QPR:$Vn, QPR:$Vm)))]>;
4060 def : Pat<(v4i32 (or (and QPR:$Vn, QPR:$Vd),
4061 (and QPR:$Vm, (vnotq QPR:$Vd)))),
4062 (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>;
4064 // VBIF : Vector Bitwise Insert if False
4065 // like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
4066 // FIXME: This instruction's encoding MAY NOT BE correct.
4067 def VBIFd : N3VX<1, 0, 0b11, 0b0001, 0, 1,
4068 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4069 N3RegFrm, IIC_VBINiD,
4070 "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4072 def VBIFq : N3VX<1, 0, 0b11, 0b0001, 1, 1,
4073 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4074 N3RegFrm, IIC_VBINiQ,
4075 "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4078 // VBIT : Vector Bitwise Insert if True
4079 // like VBSL but with: "vbit $dst, $src2, $src1", "$src3 = $dst",
4080 // FIXME: This instruction's encoding MAY NOT BE correct.
4081 def VBITd : N3VX<1, 0, 0b10, 0b0001, 0, 1,
4082 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4083 N3RegFrm, IIC_VBINiD,
4084 "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4086 def VBITq : N3VX<1, 0, 0b10, 0b0001, 1, 1,
4087 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4088 N3RegFrm, IIC_VBINiQ,
4089 "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4092 // VBIT/VBIF are not yet implemented. The TwoAddress pass will not go looking
4093 // for equivalent operations with different register constraints; it just
4096 // Vector Absolute Differences.
4098 // VABD : Vector Absolute Difference
4099 defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, N3RegFrm,
4100 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4101 "vabd", "s", int_arm_neon_vabds, 1>;
4102 defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, N3RegFrm,
4103 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4104 "vabd", "u", int_arm_neon_vabdu, 1>;
4105 def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBIND,
4106 "vabd", "f32", v2f32, v2f32, int_arm_neon_vabds, 1>;
4107 def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBINQ,
4108 "vabd", "f32", v4f32, v4f32, int_arm_neon_vabds, 1>;
4110 // VABDL : Vector Absolute Difference Long (Q = | D - D |)
4111 defm VABDLs : N3VLIntExt_QHS<0,1,0b0111,0, IIC_VSUBi4Q,
4112 "vabdl", "s", int_arm_neon_vabds, zext, 1>;
4113 defm VABDLu : N3VLIntExt_QHS<1,1,0b0111,0, IIC_VSUBi4Q,
4114 "vabdl", "u", int_arm_neon_vabdu, zext, 1>;
4116 // VABA : Vector Absolute Difference and Accumulate
4117 defm VABAs : N3VIntOp_QHS<0,0,0b0111,1, IIC_VABAD, IIC_VABAQ,
4118 "vaba", "s", int_arm_neon_vabds, add>;
4119 defm VABAu : N3VIntOp_QHS<1,0,0b0111,1, IIC_VABAD, IIC_VABAQ,
4120 "vaba", "u", int_arm_neon_vabdu, add>;
4122 // VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
4123 defm VABALs : N3VLIntExtOp_QHS<0,1,0b0101,0, IIC_VABAD,
4124 "vabal", "s", int_arm_neon_vabds, zext, add>;
4125 defm VABALu : N3VLIntExtOp_QHS<1,1,0b0101,0, IIC_VABAD,
4126 "vabal", "u", int_arm_neon_vabdu, zext, add>;
4128 // Vector Maximum and Minimum.
4130 // VMAX : Vector Maximum
4131 defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, N3RegFrm,
4132 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4133 "vmax", "s", int_arm_neon_vmaxs, 1>;
4134 defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, N3RegFrm,
4135 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4136 "vmax", "u", int_arm_neon_vmaxu, 1>;
4137 def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBIND,
4139 v2f32, v2f32, int_arm_neon_vmaxs, 1>;
4140 def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBINQ,
4142 v4f32, v4f32, int_arm_neon_vmaxs, 1>;
4144 // VMIN : Vector Minimum
4145 defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm,
4146 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4147 "vmin", "s", int_arm_neon_vmins, 1>;
4148 defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, N3RegFrm,
4149 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4150 "vmin", "u", int_arm_neon_vminu, 1>;
4151 def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBIND,
4153 v2f32, v2f32, int_arm_neon_vmins, 1>;
4154 def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBINQ,
4156 v4f32, v4f32, int_arm_neon_vmins, 1>;
4158 // Vector Pairwise Operations.
4160 // VPADD : Vector Pairwise Add
4161 def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4163 v8i8, v8i8, int_arm_neon_vpadd, 0>;
4164 def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4166 v4i16, v4i16, int_arm_neon_vpadd, 0>;
4167 def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4169 v2i32, v2i32, int_arm_neon_vpadd, 0>;
4170 def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, N3RegFrm,
4171 IIC_VPBIND, "vpadd", "f32",
4172 v2f32, v2f32, int_arm_neon_vpadd, 0>;
4174 // VPADDL : Vector Pairwise Add Long
4175 defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl", "s",
4176 int_arm_neon_vpaddls>;
4177 defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl", "u",
4178 int_arm_neon_vpaddlu>;
4180 // VPADAL : Vector Pairwise Add and Accumulate Long
4181 defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01100, 0, "vpadal", "s",
4182 int_arm_neon_vpadals>;
4183 defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01101, 0, "vpadal", "u",
4184 int_arm_neon_vpadalu>;
4186 // VPMAX : Vector Pairwise Maximum
4187 def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4188 "s8", v8i8, v8i8, int_arm_neon_vpmaxs, 0>;
4189 def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4190 "s16", v4i16, v4i16, int_arm_neon_vpmaxs, 0>;
4191 def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4192 "s32", v2i32, v2i32, int_arm_neon_vpmaxs, 0>;
4193 def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4194 "u8", v8i8, v8i8, int_arm_neon_vpmaxu, 0>;
4195 def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4196 "u16", v4i16, v4i16, int_arm_neon_vpmaxu, 0>;
4197 def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4198 "u32", v2i32, v2i32, int_arm_neon_vpmaxu, 0>;
4199 def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmax",
4200 "f32", v2f32, v2f32, int_arm_neon_vpmaxs, 0>;
4202 // VPMIN : Vector Pairwise Minimum
4203 def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4204 "s8", v8i8, v8i8, int_arm_neon_vpmins, 0>;
4205 def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4206 "s16", v4i16, v4i16, int_arm_neon_vpmins, 0>;
4207 def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4208 "s32", v2i32, v2i32, int_arm_neon_vpmins, 0>;
4209 def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4210 "u8", v8i8, v8i8, int_arm_neon_vpminu, 0>;
4211 def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4212 "u16", v4i16, v4i16, int_arm_neon_vpminu, 0>;
4213 def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4214 "u32", v2i32, v2i32, int_arm_neon_vpminu, 0>;
4215 def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmin",
4216 "f32", v2f32, v2f32, int_arm_neon_vpmins, 0>;
4218 // Vector Reciprocal and Reciprocal Square Root Estimate and Step.
4220 // VRECPE : Vector Reciprocal Estimate
4221 def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
4222 IIC_VUNAD, "vrecpe", "u32",
4223 v2i32, v2i32, int_arm_neon_vrecpe>;
4224 def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
4225 IIC_VUNAQ, "vrecpe", "u32",
4226 v4i32, v4i32, int_arm_neon_vrecpe>;
4227 def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
4228 IIC_VUNAD, "vrecpe", "f32",
4229 v2f32, v2f32, int_arm_neon_vrecpe>;
4230 def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
4231 IIC_VUNAQ, "vrecpe", "f32",
4232 v4f32, v4f32, int_arm_neon_vrecpe>;
4234 // VRECPS : Vector Reciprocal Step
4235 def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1, N3RegFrm,
4236 IIC_VRECSD, "vrecps", "f32",
4237 v2f32, v2f32, int_arm_neon_vrecps, 1>;
4238 def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1, N3RegFrm,
4239 IIC_VRECSQ, "vrecps", "f32",
4240 v4f32, v4f32, int_arm_neon_vrecps, 1>;
4242 // VRSQRTE : Vector Reciprocal Square Root Estimate
4243 def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
4244 IIC_VUNAD, "vrsqrte", "u32",
4245 v2i32, v2i32, int_arm_neon_vrsqrte>;
4246 def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
4247 IIC_VUNAQ, "vrsqrte", "u32",
4248 v4i32, v4i32, int_arm_neon_vrsqrte>;
4249 def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
4250 IIC_VUNAD, "vrsqrte", "f32",
4251 v2f32, v2f32, int_arm_neon_vrsqrte>;
4252 def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
4253 IIC_VUNAQ, "vrsqrte", "f32",
4254 v4f32, v4f32, int_arm_neon_vrsqrte>;
4256 // VRSQRTS : Vector Reciprocal Square Root Step
4257 def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
4258 IIC_VRECSD, "vrsqrts", "f32",
4259 v2f32, v2f32, int_arm_neon_vrsqrts, 1>;
4260 def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
4261 IIC_VRECSQ, "vrsqrts", "f32",
4262 v4f32, v4f32, int_arm_neon_vrsqrts, 1>;
4266 // VSHL : Vector Shift
4267 defm VSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 0, N3RegVShFrm,
4268 IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
4269 "vshl", "s", int_arm_neon_vshifts>;
4270 defm VSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 0, N3RegVShFrm,
4271 IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
4272 "vshl", "u", int_arm_neon_vshiftu>;
4274 // VSHL : Vector Shift Left (Immediate)
4275 defm VSHLi : N2VShL_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
4277 // VSHR : Vector Shift Right (Immediate)
4278 defm VSHRs : N2VShR_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s",NEONvshrs>;
4279 defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u",NEONvshru>;
4281 // VSHLL : Vector Shift Left Long
4282 defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
4283 defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
4285 // VSHLL : Vector Shift Left Long (with maximum shift count)
4286 class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
4287 bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
4288 ValueType OpTy, SDNode OpNode>
4289 : N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
4290 ResTy, OpTy, OpNode> {
4291 let Inst{21-16} = op21_16;
4292 let DecoderMethod = "DecodeVSHLMaxInstruction";
4294 def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
4295 v8i16, v8i8, NEONvshlli>;
4296 def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
4297 v4i32, v4i16, NEONvshlli>;
4298 def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
4299 v2i64, v2i32, NEONvshlli>;
4301 // VSHRN : Vector Shift Right and Narrow
4302 defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
4305 // VRSHL : Vector Rounding Shift
4306 defm VRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 0, N3RegVShFrm,
4307 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4308 "vrshl", "s", int_arm_neon_vrshifts>;
4309 defm VRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 0, N3RegVShFrm,
4310 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4311 "vrshl", "u", int_arm_neon_vrshiftu>;
4312 // VRSHR : Vector Rounding Shift Right
4313 defm VRSHRs : N2VShR_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s",NEONvrshrs>;
4314 defm VRSHRu : N2VShR_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u",NEONvrshru>;
4316 // VRSHRN : Vector Rounding Shift Right and Narrow
4317 defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
4320 // VQSHL : Vector Saturating Shift
4321 defm VQSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 1, N3RegVShFrm,
4322 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4323 "vqshl", "s", int_arm_neon_vqshifts>;
4324 defm VQSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 1, N3RegVShFrm,
4325 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4326 "vqshl", "u", int_arm_neon_vqshiftu>;
4327 // VQSHL : Vector Saturating Shift Left (Immediate)
4328 defm VQSHLsi : N2VShL_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s",NEONvqshls>;
4329 defm VQSHLui : N2VShL_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u",NEONvqshlu>;
4331 // VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
4332 defm VQSHLsu : N2VShL_QHSD<1,1,0b0110,1, IIC_VSHLi4D,"vqshlu","s",NEONvqshlsu>;
4334 // VQSHRN : Vector Saturating Shift Right and Narrow
4335 defm VQSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "s",
4337 defm VQSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "u",
4340 // VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
4341 defm VQSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 0, 1, IIC_VSHLi4D, "vqshrun", "s",
4344 // VQRSHL : Vector Saturating Rounding Shift
4345 defm VQRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 1, N3RegVShFrm,
4346 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4347 "vqrshl", "s", int_arm_neon_vqrshifts>;
4348 defm VQRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 1, N3RegVShFrm,
4349 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4350 "vqrshl", "u", int_arm_neon_vqrshiftu>;
4352 // VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
4353 defm VQRSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "s",
4355 defm VQRSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "u",
4358 // VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
4359 defm VQRSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vqrshrun", "s",
4362 // VSRA : Vector Shift Right and Accumulate
4363 defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra", "s", NEONvshrs>;
4364 defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra", "u", NEONvshru>;
4365 // VRSRA : Vector Rounding Shift Right and Accumulate
4366 defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra", "s", NEONvrshrs>;
4367 defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra", "u", NEONvrshru>;
4369 // VSLI : Vector Shift Left and Insert
4370 defm VSLI : N2VShInsL_QHSD<1, 1, 0b0101, 1, "vsli">;
4372 // VSRI : Vector Shift Right and Insert
4373 defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">;
4375 // Vector Absolute and Saturating Absolute.
4377 // VABS : Vector Absolute Value
4378 defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
4379 IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
4381 def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
4382 IIC_VUNAD, "vabs", "f32",
4383 v2f32, v2f32, int_arm_neon_vabs>;
4384 def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
4385 IIC_VUNAQ, "vabs", "f32",
4386 v4f32, v4f32, int_arm_neon_vabs>;
4388 // VQABS : Vector Saturating Absolute Value
4389 defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
4390 IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
4391 int_arm_neon_vqabs>;
4395 def vnegd : PatFrag<(ops node:$in),
4396 (sub (bitconvert (v2i32 NEONimmAllZerosV)), node:$in)>;
4397 def vnegq : PatFrag<(ops node:$in),
4398 (sub (bitconvert (v4i32 NEONimmAllZerosV)), node:$in)>;
4400 class VNEGD<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
4401 : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$Vd), (ins DPR:$Vm),
4402 IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
4403 [(set DPR:$Vd, (Ty (vnegd DPR:$Vm)))]>;
4404 class VNEGQ<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
4405 : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$Vd), (ins QPR:$Vm),
4406 IIC_VSHLiQ, OpcodeStr, Dt, "$Vd, $Vm", "",
4407 [(set QPR:$Vd, (Ty (vnegq QPR:$Vm)))]>;
4409 // VNEG : Vector Negate (integer)
4410 def VNEGs8d : VNEGD<0b00, "vneg", "s8", v8i8>;
4411 def VNEGs16d : VNEGD<0b01, "vneg", "s16", v4i16>;
4412 def VNEGs32d : VNEGD<0b10, "vneg", "s32", v2i32>;
4413 def VNEGs8q : VNEGQ<0b00, "vneg", "s8", v16i8>;
4414 def VNEGs16q : VNEGQ<0b01, "vneg", "s16", v8i16>;
4415 def VNEGs32q : VNEGQ<0b10, "vneg", "s32", v4i32>;
4417 // VNEG : Vector Negate (floating-point)
4418 def VNEGfd : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
4419 (outs DPR:$Vd), (ins DPR:$Vm), IIC_VUNAD,
4420 "vneg", "f32", "$Vd, $Vm", "",
4421 [(set DPR:$Vd, (v2f32 (fneg DPR:$Vm)))]>;
4422 def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
4423 (outs QPR:$Vd), (ins QPR:$Vm), IIC_VUNAQ,
4424 "vneg", "f32", "$Vd, $Vm", "",
4425 [(set QPR:$Vd, (v4f32 (fneg QPR:$Vm)))]>;
4427 def : Pat<(v8i8 (vnegd DPR:$src)), (VNEGs8d DPR:$src)>;
4428 def : Pat<(v4i16 (vnegd DPR:$src)), (VNEGs16d DPR:$src)>;
4429 def : Pat<(v2i32 (vnegd DPR:$src)), (VNEGs32d DPR:$src)>;
4430 def : Pat<(v16i8 (vnegq QPR:$src)), (VNEGs8q QPR:$src)>;
4431 def : Pat<(v8i16 (vnegq QPR:$src)), (VNEGs16q QPR:$src)>;
4432 def : Pat<(v4i32 (vnegq QPR:$src)), (VNEGs32q QPR:$src)>;
4434 // VQNEG : Vector Saturating Negate
4435 defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
4436 IIC_VQUNAiD, IIC_VQUNAiQ, "vqneg", "s",
4437 int_arm_neon_vqneg>;
4439 // Vector Bit Counting Operations.
4441 // VCLS : Vector Count Leading Sign Bits
4442 defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
4443 IIC_VCNTiD, IIC_VCNTiQ, "vcls", "s",
4445 // VCLZ : Vector Count Leading Zeros
4446 defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
4447 IIC_VCNTiD, IIC_VCNTiQ, "vclz", "i",
4449 // VCNT : Vector Count One Bits
4450 def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
4451 IIC_VCNTiD, "vcnt", "8",
4452 v8i8, v8i8, int_arm_neon_vcnt>;
4453 def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
4454 IIC_VCNTiQ, "vcnt", "8",
4455 v16i8, v16i8, int_arm_neon_vcnt>;
4458 def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
4459 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
4460 "vswp", "$Vd, $Vm", "", []>;
4461 def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
4462 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
4463 "vswp", "$Vd, $Vm", "", []>;
4465 // Vector Move Operations.
4467 // VMOV : Vector Move (Register)
4468 def : InstAlias<"vmov${p} $Vd, $Vm",
4469 (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
4470 def : InstAlias<"vmov${p} $Vd, $Vm",
4471 (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
4472 defm : VFPDTAnyNoF64InstAlias<"vmov${p}", "$Vd, $Vm",
4473 (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
4474 defm : VFPDTAnyNoF64InstAlias<"vmov${p}", "$Vd, $Vm",
4475 (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
4477 // VMOV : Vector Move (Immediate)
4479 let isReMaterializable = 1 in {
4480 def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd),
4481 (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
4482 "vmov", "i8", "$Vd, $SIMM", "",
4483 [(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
4484 def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd),
4485 (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
4486 "vmov", "i8", "$Vd, $SIMM", "",
4487 [(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
4489 def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd),
4490 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4491 "vmov", "i16", "$Vd, $SIMM", "",
4492 [(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> {
4493 let Inst{9} = SIMM{9};
4496 def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd),
4497 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4498 "vmov", "i16", "$Vd, $SIMM", "",
4499 [(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> {
4500 let Inst{9} = SIMM{9};
4503 def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd),
4504 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4505 "vmov", "i32", "$Vd, $SIMM", "",
4506 [(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> {
4507 let Inst{11-8} = SIMM{11-8};
4510 def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd),
4511 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4512 "vmov", "i32", "$Vd, $SIMM", "",
4513 [(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> {
4514 let Inst{11-8} = SIMM{11-8};
4517 def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd),
4518 (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
4519 "vmov", "i64", "$Vd, $SIMM", "",
4520 [(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
4521 def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd),
4522 (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
4523 "vmov", "i64", "$Vd, $SIMM", "",
4524 [(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
4526 def VMOVv2f32 : N1ModImm<1, 0b000, 0b1111, 0, 0, 0, 1, (outs DPR:$Vd),
4527 (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
4528 "vmov", "f32", "$Vd, $SIMM", "",
4529 [(set DPR:$Vd, (v2f32 (NEONvmovFPImm timm:$SIMM)))]>;
4530 def VMOVv4f32 : N1ModImm<1, 0b000, 0b1111, 0, 1, 0, 1, (outs QPR:$Vd),
4531 (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
4532 "vmov", "f32", "$Vd, $SIMM", "",
4533 [(set QPR:$Vd, (v4f32 (NEONvmovFPImm timm:$SIMM)))]>;
4534 } // isReMaterializable
4536 // VMOV : Vector Get Lane (move scalar to ARM core register)
4538 def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
4539 (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
4540 IIC_VMOVSI, "vmov", "s8", "$R, $V$lane",
4541 [(set GPR:$R, (NEONvgetlanes (v8i8 DPR:$V),
4543 let Inst{21} = lane{2};
4544 let Inst{6-5} = lane{1-0};
4546 def VGETLNs16 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, {?,1},
4547 (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
4548 IIC_VMOVSI, "vmov", "s16", "$R, $V$lane",
4549 [(set GPR:$R, (NEONvgetlanes (v4i16 DPR:$V),
4551 let Inst{21} = lane{1};
4552 let Inst{6} = lane{0};
4554 def VGETLNu8 : NVGetLane<{1,1,1,0,1,1,?,1}, 0b1011, {?,?},
4555 (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
4556 IIC_VMOVSI, "vmov", "u8", "$R, $V$lane",
4557 [(set GPR:$R, (NEONvgetlaneu (v8i8 DPR:$V),
4559 let Inst{21} = lane{2};
4560 let Inst{6-5} = lane{1-0};
4562 def VGETLNu16 : NVGetLane<{1,1,1,0,1,0,?,1}, 0b1011, {?,1},
4563 (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
4564 IIC_VMOVSI, "vmov", "u16", "$R, $V$lane",
4565 [(set GPR:$R, (NEONvgetlaneu (v4i16 DPR:$V),
4567 let Inst{21} = lane{1};
4568 let Inst{6} = lane{0};
4570 def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
4571 (outs GPR:$R), (ins DPR:$V, VectorIndex32:$lane),
4572 IIC_VMOVSI, "vmov", "32", "$R, $V$lane",
4573 [(set GPR:$R, (extractelt (v2i32 DPR:$V),
4575 let Inst{21} = lane{0};
4577 // def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
4578 def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
4579 (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
4580 (DSubReg_i8_reg imm:$lane))),
4581 (SubReg_i8_lane imm:$lane))>;
4582 def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
4583 (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
4584 (DSubReg_i16_reg imm:$lane))),
4585 (SubReg_i16_lane imm:$lane))>;
4586 def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
4587 (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
4588 (DSubReg_i8_reg imm:$lane))),
4589 (SubReg_i8_lane imm:$lane))>;
4590 def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
4591 (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
4592 (DSubReg_i16_reg imm:$lane))),
4593 (SubReg_i16_lane imm:$lane))>;
4594 def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
4595 (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
4596 (DSubReg_i32_reg imm:$lane))),
4597 (SubReg_i32_lane imm:$lane))>;
4598 def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2),
4599 (EXTRACT_SUBREG (v2f32 (COPY_TO_REGCLASS (v2f32 DPR:$src1),DPR_VFP2)),
4600 (SSubReg_f32_reg imm:$src2))>;
4601 def : Pat<(extractelt (v4f32 QPR:$src1), imm:$src2),
4602 (EXTRACT_SUBREG (v4f32 (COPY_TO_REGCLASS (v4f32 QPR:$src1),QPR_VFP2)),
4603 (SSubReg_f32_reg imm:$src2))>;
4604 //def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
4605 // (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
4606 def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
4607 (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
4610 // VMOV : Vector Set Lane (move ARM core register to scalar)
4612 let Constraints = "$src1 = $V" in {
4613 def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$V),
4614 (ins DPR:$src1, GPR:$R, VectorIndex8:$lane),
4615 IIC_VMOVISL, "vmov", "8", "$V$lane, $R",
4616 [(set DPR:$V, (vector_insert (v8i8 DPR:$src1),
4617 GPR:$R, imm:$lane))]> {
4618 let Inst{21} = lane{2};
4619 let Inst{6-5} = lane{1-0};
4621 def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$V),
4622 (ins DPR:$src1, GPR:$R, VectorIndex16:$lane),
4623 IIC_VMOVISL, "vmov", "16", "$V$lane, $R",
4624 [(set DPR:$V, (vector_insert (v4i16 DPR:$src1),
4625 GPR:$R, imm:$lane))]> {
4626 let Inst{21} = lane{1};
4627 let Inst{6} = lane{0};
4629 def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$V),
4630 (ins DPR:$src1, GPR:$R, VectorIndex32:$lane),
4631 IIC_VMOVISL, "vmov", "32", "$V$lane, $R",
4632 [(set DPR:$V, (insertelt (v2i32 DPR:$src1),
4633 GPR:$R, imm:$lane))]> {
4634 let Inst{21} = lane{0};
4637 def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
4638 (v16i8 (INSERT_SUBREG QPR:$src1,
4639 (v8i8 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
4640 (DSubReg_i8_reg imm:$lane))),
4641 GPR:$src2, (SubReg_i8_lane imm:$lane))),
4642 (DSubReg_i8_reg imm:$lane)))>;
4643 def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
4644 (v8i16 (INSERT_SUBREG QPR:$src1,
4645 (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
4646 (DSubReg_i16_reg imm:$lane))),
4647 GPR:$src2, (SubReg_i16_lane imm:$lane))),
4648 (DSubReg_i16_reg imm:$lane)))>;
4649 def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
4650 (v4i32 (INSERT_SUBREG QPR:$src1,
4651 (v2i32 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
4652 (DSubReg_i32_reg imm:$lane))),
4653 GPR:$src2, (SubReg_i32_lane imm:$lane))),
4654 (DSubReg_i32_reg imm:$lane)))>;
4656 def : Pat<(v2f32 (insertelt DPR:$src1, SPR:$src2, imm:$src3)),
4657 (INSERT_SUBREG (v2f32 (COPY_TO_REGCLASS DPR:$src1, DPR_VFP2)),
4658 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
4659 def : Pat<(v4f32 (insertelt QPR:$src1, SPR:$src2, imm:$src3)),
4660 (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)),
4661 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
4663 //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
4664 // (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
4665 def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
4666 (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
4668 def : Pat<(v2f32 (scalar_to_vector SPR:$src)),
4669 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>;
4670 def : Pat<(v2f64 (scalar_to_vector (f64 DPR:$src))),
4671 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, dsub_0)>;
4672 def : Pat<(v4f32 (scalar_to_vector SPR:$src)),
4673 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>;
4675 def : Pat<(v8i8 (scalar_to_vector GPR:$src)),
4676 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4677 def : Pat<(v4i16 (scalar_to_vector GPR:$src)),
4678 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4679 def : Pat<(v2i32 (scalar_to_vector GPR:$src)),
4680 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4682 def : Pat<(v16i8 (scalar_to_vector GPR:$src)),
4683 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4684 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4686 def : Pat<(v8i16 (scalar_to_vector GPR:$src)),
4687 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4688 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4690 def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
4691 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4692 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4695 // VDUP : Vector Duplicate (from ARM core register to all elements)
4697 class VDUPD<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
4698 : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$V), (ins GPR:$R),
4699 IIC_VMOVIS, "vdup", Dt, "$V, $R",
4700 [(set DPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
4701 class VDUPQ<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
4702 : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$V), (ins GPR:$R),
4703 IIC_VMOVIS, "vdup", Dt, "$V, $R",
4704 [(set QPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
4706 def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>;
4707 def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>;
4708 def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>;
4709 def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>;
4710 def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>;
4711 def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>;
4713 def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>;
4714 def : Pat<(v4f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32q GPR:$R)>;
4716 // VDUP : Vector Duplicate Lane (from scalar to all elements)
4718 class VDUPLND<bits<4> op19_16, string OpcodeStr, string Dt,
4719 ValueType Ty, Operand IdxTy>
4720 : NVDupLane<op19_16, 0, (outs DPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
4721 IIC_VMOVD, OpcodeStr, Dt, "$Vd, $Vm$lane",
4722 [(set DPR:$Vd, (Ty (NEONvduplane (Ty DPR:$Vm), imm:$lane)))]>;
4724 class VDUPLNQ<bits<4> op19_16, string OpcodeStr, string Dt,
4725 ValueType ResTy, ValueType OpTy, Operand IdxTy>
4726 : NVDupLane<op19_16, 1, (outs QPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
4727 IIC_VMOVQ, OpcodeStr, Dt, "$Vd, $Vm$lane",
4728 [(set QPR:$Vd, (ResTy (NEONvduplane (OpTy DPR:$Vm),
4729 VectorIndex32:$lane)))]>;
4731 // Inst{19-16} is partially specified depending on the element size.
4733 def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8, VectorIndex8> {
4735 let Inst{19-17} = lane{2-0};
4737 def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16, VectorIndex16> {
4739 let Inst{19-18} = lane{1-0};
4741 def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32, VectorIndex32> {
4743 let Inst{19} = lane{0};
4745 def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8, VectorIndex8> {
4747 let Inst{19-17} = lane{2-0};
4749 def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16, VectorIndex16> {
4751 let Inst{19-18} = lane{1-0};
4753 def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32, VectorIndex32> {
4755 let Inst{19} = lane{0};
4758 def : Pat<(v2f32 (NEONvduplane (v2f32 DPR:$Vm), imm:$lane)),
4759 (VDUPLN32d DPR:$Vm, imm:$lane)>;
4761 def : Pat<(v4f32 (NEONvduplane (v2f32 DPR:$Vm), imm:$lane)),
4762 (VDUPLN32q DPR:$Vm, imm:$lane)>;
4764 def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
4765 (v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
4766 (DSubReg_i8_reg imm:$lane))),
4767 (SubReg_i8_lane imm:$lane)))>;
4768 def : Pat<(v8i16 (NEONvduplane (v8i16 QPR:$src), imm:$lane)),
4769 (v8i16 (VDUPLN16q (v4i16 (EXTRACT_SUBREG QPR:$src,
4770 (DSubReg_i16_reg imm:$lane))),
4771 (SubReg_i16_lane imm:$lane)))>;
4772 def : Pat<(v4i32 (NEONvduplane (v4i32 QPR:$src), imm:$lane)),
4773 (v4i32 (VDUPLN32q (v2i32 (EXTRACT_SUBREG QPR:$src,
4774 (DSubReg_i32_reg imm:$lane))),
4775 (SubReg_i32_lane imm:$lane)))>;
4776 def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
4777 (v4f32 (VDUPLN32q (v2f32 (EXTRACT_SUBREG QPR:$src,
4778 (DSubReg_i32_reg imm:$lane))),
4779 (SubReg_i32_lane imm:$lane)))>;
4781 def VDUPfdf : PseudoNeonI<(outs DPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
4782 [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
4783 def VDUPfqf : PseudoNeonI<(outs QPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
4784 [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
4786 // VMOVN : Vector Narrowing Move
4787 defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN,
4788 "vmovn", "i", trunc>;
4789 // VQMOVN : Vector Saturating Narrowing Move
4790 defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, IIC_VQUNAiD,
4791 "vqmovn", "s", int_arm_neon_vqmovns>;
4792 defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, IIC_VQUNAiD,
4793 "vqmovn", "u", int_arm_neon_vqmovnu>;
4794 defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, IIC_VQUNAiD,
4795 "vqmovun", "s", int_arm_neon_vqmovnsu>;
4796 // VMOVL : Vector Lengthening Move
4797 defm VMOVLs : N2VL_QHS<0b01,0b10100,0,1, "vmovl", "s", sext>;
4798 defm VMOVLu : N2VL_QHS<0b11,0b10100,0,1, "vmovl", "u", zext>;
4800 // Vector Conversions.
4802 // VCVT : Vector Convert Between Floating-Point and Integers
4803 def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
4804 v2i32, v2f32, fp_to_sint>;
4805 def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
4806 v2i32, v2f32, fp_to_uint>;
4807 def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
4808 v2f32, v2i32, sint_to_fp>;
4809 def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
4810 v2f32, v2i32, uint_to_fp>;
4812 def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
4813 v4i32, v4f32, fp_to_sint>;
4814 def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
4815 v4i32, v4f32, fp_to_uint>;
4816 def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
4817 v4f32, v4i32, sint_to_fp>;
4818 def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
4819 v4f32, v4i32, uint_to_fp>;
4821 // VCVT : Vector Convert Between Floating-Point and Fixed-Point.
4822 let DecoderMethod = "DecodeVCVTD" in {
4823 def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
4824 v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
4825 def VCVTf2xud : N2VCvtD<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
4826 v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
4827 def VCVTxs2fd : N2VCvtD<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
4828 v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
4829 def VCVTxu2fd : N2VCvtD<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
4830 v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
4833 let DecoderMethod = "DecodeVCVTQ" in {
4834 def VCVTf2xsq : N2VCvtQ<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
4835 v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
4836 def VCVTf2xuq : N2VCvtQ<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
4837 v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
4838 def VCVTxs2fq : N2VCvtQ<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
4839 v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
4840 def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
4841 v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
4844 // VCVT : Vector Convert Between Half-Precision and Single-Precision.
4845 def VCVTf2h : N2VNInt<0b11, 0b11, 0b01, 0b10, 0b01100, 0, 0,
4846 IIC_VUNAQ, "vcvt", "f16.f32",
4847 v4i16, v4f32, int_arm_neon_vcvtfp2hf>,
4848 Requires<[HasNEON, HasFP16]>;
4849 def VCVTh2f : N2VLInt<0b11, 0b11, 0b01, 0b10, 0b01110, 0, 0,
4850 IIC_VUNAQ, "vcvt", "f32.f16",
4851 v4f32, v4i16, int_arm_neon_vcvthf2fp>,
4852 Requires<[HasNEON, HasFP16]>;
4856 // VREV64 : Vector Reverse elements within 64-bit doublewords
4858 class VREV64D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4859 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$Vd),
4860 (ins DPR:$Vm), IIC_VMOVD,
4861 OpcodeStr, Dt, "$Vd, $Vm", "",
4862 [(set DPR:$Vd, (Ty (NEONvrev64 (Ty DPR:$Vm))))]>;
4863 class VREV64Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4864 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$Vd),
4865 (ins QPR:$Vm), IIC_VMOVQ,
4866 OpcodeStr, Dt, "$Vd, $Vm", "",
4867 [(set QPR:$Vd, (Ty (NEONvrev64 (Ty QPR:$Vm))))]>;
4869 def VREV64d8 : VREV64D<0b00, "vrev64", "8", v8i8>;
4870 def VREV64d16 : VREV64D<0b01, "vrev64", "16", v4i16>;
4871 def VREV64d32 : VREV64D<0b10, "vrev64", "32", v2i32>;
4872 def : Pat<(v2f32 (NEONvrev64 (v2f32 DPR:$Vm))), (VREV64d32 DPR:$Vm)>;
4874 def VREV64q8 : VREV64Q<0b00, "vrev64", "8", v16i8>;
4875 def VREV64q16 : VREV64Q<0b01, "vrev64", "16", v8i16>;
4876 def VREV64q32 : VREV64Q<0b10, "vrev64", "32", v4i32>;
4877 def : Pat<(v4f32 (NEONvrev64 (v4f32 QPR:$Vm))), (VREV64q32 QPR:$Vm)>;
4879 // VREV32 : Vector Reverse elements within 32-bit words
4881 class VREV32D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4882 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$Vd),
4883 (ins DPR:$Vm), IIC_VMOVD,
4884 OpcodeStr, Dt, "$Vd, $Vm", "",
4885 [(set DPR:$Vd, (Ty (NEONvrev32 (Ty DPR:$Vm))))]>;
4886 class VREV32Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4887 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$Vd),
4888 (ins QPR:$Vm), IIC_VMOVQ,
4889 OpcodeStr, Dt, "$Vd, $Vm", "",
4890 [(set QPR:$Vd, (Ty (NEONvrev32 (Ty QPR:$Vm))))]>;
4892 def VREV32d8 : VREV32D<0b00, "vrev32", "8", v8i8>;
4893 def VREV32d16 : VREV32D<0b01, "vrev32", "16", v4i16>;
4895 def VREV32q8 : VREV32Q<0b00, "vrev32", "8", v16i8>;
4896 def VREV32q16 : VREV32Q<0b01, "vrev32", "16", v8i16>;
4898 // VREV16 : Vector Reverse elements within 16-bit halfwords
4900 class VREV16D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4901 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$Vd),
4902 (ins DPR:$Vm), IIC_VMOVD,
4903 OpcodeStr, Dt, "$Vd, $Vm", "",
4904 [(set DPR:$Vd, (Ty (NEONvrev16 (Ty DPR:$Vm))))]>;
4905 class VREV16Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4906 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$Vd),
4907 (ins QPR:$Vm), IIC_VMOVQ,
4908 OpcodeStr, Dt, "$Vd, $Vm", "",
4909 [(set QPR:$Vd, (Ty (NEONvrev16 (Ty QPR:$Vm))))]>;
4911 def VREV16d8 : VREV16D<0b00, "vrev16", "8", v8i8>;
4912 def VREV16q8 : VREV16Q<0b00, "vrev16", "8", v16i8>;
4914 // Other Vector Shuffles.
4916 // Aligned extractions: really just dropping registers
4918 class AlignedVEXTq<ValueType DestTy, ValueType SrcTy, SDNodeXForm LaneCVT>
4919 : Pat<(DestTy (vector_extract_subvec (SrcTy QPR:$src), (i32 imm:$start))),
4920 (EXTRACT_SUBREG (SrcTy QPR:$src), (LaneCVT imm:$start))>;
4922 def : AlignedVEXTq<v8i8, v16i8, DSubReg_i8_reg>;
4924 def : AlignedVEXTq<v4i16, v8i16, DSubReg_i16_reg>;
4926 def : AlignedVEXTq<v2i32, v4i32, DSubReg_i32_reg>;
4928 def : AlignedVEXTq<v1i64, v2i64, DSubReg_f64_reg>;
4930 def : AlignedVEXTq<v2f32, v4f32, DSubReg_i32_reg>;
4933 // VEXT : Vector Extract
4935 class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
4936 : N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$Vd),
4937 (ins DPR:$Vn, DPR:$Vm, i32imm:$index), NVExtFrm,
4938 IIC_VEXTD, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
4939 [(set DPR:$Vd, (Ty (NEONvext (Ty DPR:$Vn),
4940 (Ty DPR:$Vm), imm:$index)))]> {
4942 let Inst{11-8} = index{3-0};
4945 class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
4946 : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$Vd),
4947 (ins QPR:$Vn, QPR:$Vm, i32imm:$index), NVExtFrm,
4948 IIC_VEXTQ, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
4949 [(set QPR:$Vd, (Ty (NEONvext (Ty QPR:$Vn),
4950 (Ty QPR:$Vm), imm:$index)))]> {
4952 let Inst{11-8} = index{3-0};
4955 def VEXTd8 : VEXTd<"vext", "8", v8i8> {
4956 let Inst{11-8} = index{3-0};
4958 def VEXTd16 : VEXTd<"vext", "16", v4i16> {
4959 let Inst{11-9} = index{2-0};
4962 def VEXTd32 : VEXTd<"vext", "32", v2i32> {
4963 let Inst{11-10} = index{1-0};
4964 let Inst{9-8} = 0b00;
4966 def : Pat<(v2f32 (NEONvext (v2f32 DPR:$Vn),
4969 (VEXTd32 DPR:$Vn, DPR:$Vm, imm:$index)>;
4971 def VEXTq8 : VEXTq<"vext", "8", v16i8> {
4972 let Inst{11-8} = index{3-0};
4974 def VEXTq16 : VEXTq<"vext", "16", v8i16> {
4975 let Inst{11-9} = index{2-0};
4978 def VEXTq32 : VEXTq<"vext", "32", v4i32> {
4979 let Inst{11-10} = index{1-0};
4980 let Inst{9-8} = 0b00;
4982 def : Pat<(v4f32 (NEONvext (v4f32 QPR:$Vn),
4985 (VEXTq32 QPR:$Vn, QPR:$Vm, imm:$index)>;
4987 // VTRN : Vector Transpose
4989 def VTRNd8 : N2VDShuffle<0b00, 0b00001, "vtrn", "8">;
4990 def VTRNd16 : N2VDShuffle<0b01, 0b00001, "vtrn", "16">;
4991 def VTRNd32 : N2VDShuffle<0b10, 0b00001, "vtrn", "32">;
4993 def VTRNq8 : N2VQShuffle<0b00, 0b00001, IIC_VPERMQ, "vtrn", "8">;
4994 def VTRNq16 : N2VQShuffle<0b01, 0b00001, IIC_VPERMQ, "vtrn", "16">;
4995 def VTRNq32 : N2VQShuffle<0b10, 0b00001, IIC_VPERMQ, "vtrn", "32">;
4997 // VUZP : Vector Unzip (Deinterleave)
4999 def VUZPd8 : N2VDShuffle<0b00, 0b00010, "vuzp", "8">;
5000 def VUZPd16 : N2VDShuffle<0b01, 0b00010, "vuzp", "16">;
5001 def VUZPd32 : N2VDShuffle<0b10, 0b00010, "vuzp", "32">;
5003 def VUZPq8 : N2VQShuffle<0b00, 0b00010, IIC_VPERMQ3, "vuzp", "8">;
5004 def VUZPq16 : N2VQShuffle<0b01, 0b00010, IIC_VPERMQ3, "vuzp", "16">;
5005 def VUZPq32 : N2VQShuffle<0b10, 0b00010, IIC_VPERMQ3, "vuzp", "32">;
5007 // VZIP : Vector Zip (Interleave)
5009 def VZIPd8 : N2VDShuffle<0b00, 0b00011, "vzip", "8">;
5010 def VZIPd16 : N2VDShuffle<0b01, 0b00011, "vzip", "16">;
5011 def VZIPd32 : N2VDShuffle<0b10, 0b00011, "vzip", "32">;
5013 def VZIPq8 : N2VQShuffle<0b00, 0b00011, IIC_VPERMQ3, "vzip", "8">;
5014 def VZIPq16 : N2VQShuffle<0b01, 0b00011, IIC_VPERMQ3, "vzip", "16">;
5015 def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
5017 // Vector Table Lookup and Table Extension.
5019 // VTBL : Vector Table Lookup
5020 let DecoderMethod = "DecodeTBLInstruction" in {
5022 : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd),
5023 (ins VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
5024 "vtbl", "8", "$Vd, $Vn, $Vm", "",
5025 [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 VecListOneD:$Vn, DPR:$Vm)))]>;
5026 let hasExtraSrcRegAllocReq = 1 in {
5028 : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd),
5029 (ins DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTB2,
5030 "vtbl", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "", []>;
5032 : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$Vd),
5033 (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm), NVTBLFrm, IIC_VTB3,
5034 "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm", "", []>;
5036 : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$Vd),
5037 (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm),
5039 "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm", "", []>;
5040 } // hasExtraSrcRegAllocReq = 1
5043 : PseudoNeonI<(outs DPR:$dst), (ins QPR:$tbl, DPR:$src), IIC_VTB2, "", []>;
5045 : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB3, "", []>;
5047 : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB4, "", []>;
5049 // VTBX : Vector Table Extension
5051 : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$Vd),
5052 (ins DPR:$orig, VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX1,
5053 "vtbx", "8", "$Vd, $Vn, $Vm", "$orig = $Vd",
5054 [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbx1
5055 DPR:$orig, VecListOneD:$Vn, DPR:$Vm)))]>;
5056 let hasExtraSrcRegAllocReq = 1 in {
5058 : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$Vd),
5059 (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTBX2,
5060 "vtbx", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "$orig = $Vd", []>;
5062 : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$Vd),
5063 (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm),
5064 NVTBLFrm, IIC_VTBX3,
5065 "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm",
5068 : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$Vd), (ins DPR:$orig, DPR:$Vn,
5069 DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm), NVTBLFrm, IIC_VTBX4,
5070 "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm",
5072 } // hasExtraSrcRegAllocReq = 1
5075 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QPR:$tbl, DPR:$src),
5076 IIC_VTBX2, "$orig = $dst", []>;
5078 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
5079 IIC_VTBX3, "$orig = $dst", []>;
5081 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
5082 IIC_VTBX4, "$orig = $dst", []>;
5083 } // DecoderMethod = "DecodeTBLInstruction"
5085 //===----------------------------------------------------------------------===//
5086 // NEON instructions for single-precision FP math
5087 //===----------------------------------------------------------------------===//
5089 class N2VSPat<SDNode OpNode, NeonI Inst>
5090 : NEONFPPat<(f32 (OpNode SPR:$a)),
5092 (v2f32 (COPY_TO_REGCLASS (Inst
5094 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5095 SPR:$a, ssub_0)), DPR_VFP2)), ssub_0)>;
5097 class N3VSPat<SDNode OpNode, NeonI Inst>
5098 : NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
5100 (v2f32 (COPY_TO_REGCLASS (Inst
5102 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5105 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5106 SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
5108 class N3VSMulOpPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
5109 : NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
5111 (v2f32 (COPY_TO_REGCLASS (Inst
5113 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5116 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5119 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5120 SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
5122 def : N3VSPat<fadd, VADDfd>;
5123 def : N3VSPat<fsub, VSUBfd>;
5124 def : N3VSPat<fmul, VMULfd>;
5125 def : N3VSMulOpPat<fmul, fadd, VMLAfd>,
5126 Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
5127 def : N3VSMulOpPat<fmul, fsub, VMLSfd>,
5128 Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
5129 def : N2VSPat<fabs, VABSfd>;
5130 def : N2VSPat<fneg, VNEGfd>;
5131 def : N3VSPat<NEONfmax, VMAXfd>;
5132 def : N3VSPat<NEONfmin, VMINfd>;
5133 def : N2VSPat<arm_ftosi, VCVTf2sd>;
5134 def : N2VSPat<arm_ftoui, VCVTf2ud>;
5135 def : N2VSPat<arm_sitof, VCVTs2fd>;
5136 def : N2VSPat<arm_uitof, VCVTu2fd>;
5138 //===----------------------------------------------------------------------===//
5139 // Non-Instruction Patterns
5140 //===----------------------------------------------------------------------===//
5143 def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
5144 def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
5145 def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
5146 def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
5147 def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
5148 def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
5149 def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
5150 def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
5151 def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
5152 def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
5153 def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
5154 def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
5155 def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
5156 def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
5157 def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
5158 def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
5159 def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
5160 def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
5161 def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
5162 def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
5163 def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
5164 def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
5165 def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
5166 def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
5167 def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
5168 def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
5169 def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
5170 def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
5171 def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
5172 def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
5174 def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
5175 def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
5176 def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
5177 def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
5178 def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
5179 def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
5180 def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
5181 def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
5182 def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
5183 def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
5184 def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
5185 def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
5186 def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
5187 def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
5188 def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
5189 def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
5190 def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
5191 def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
5192 def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
5193 def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
5194 def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
5195 def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
5196 def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
5197 def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
5198 def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
5199 def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
5200 def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
5201 def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
5202 def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
5203 def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
5206 //===----------------------------------------------------------------------===//
5207 // Assembler aliases
5210 // VAND/VEOR/VORR accept but do not require a type suffix.
5211 defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
5212 (VANDd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
5213 defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
5214 (VANDq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
5215 defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
5216 (VEORd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
5217 defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
5218 (VEORq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
5219 defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
5220 (VORRd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
5221 defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
5222 (VORRq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
5224 // VLD1 requires a size suffix, but also accepts type specific variants.
5225 // Load one D register.
5226 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5227 (VLD1d8 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5228 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5229 (VLD1d16 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5230 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5231 (VLD1d32 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5232 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5233 (VLD1d64 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5234 // with writeback, fixed stride
5235 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5236 (VLD1d8wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5237 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5238 (VLD1d16wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5239 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5240 (VLD1d32wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5241 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5242 (VLD1d64wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5243 // with writeback, register stride
5244 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5245 (VLD1d8wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5246 rGPR:$Rm, pred:$p)>;
5247 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5248 (VLD1d16wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5249 rGPR:$Rm, pred:$p)>;
5250 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5251 (VLD1d32wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5252 rGPR:$Rm, pred:$p)>;
5253 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5254 (VLD1d64wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5255 rGPR:$Rm, pred:$p)>;
5257 // Load two D registers.
5258 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5259 (VLD1q8 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5260 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5261 (VLD1q16 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5262 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5263 (VLD1q32 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5264 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5265 (VLD1q64 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5266 // with writeback, fixed stride
5267 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5268 (VLD1q8wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5269 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5270 (VLD1q16wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5271 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5272 (VLD1q32wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5273 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5274 (VLD1q64wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5275 // with writeback, register stride
5276 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5277 (VLD1q8wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5278 rGPR:$Rm, pred:$p)>;
5279 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5280 (VLD1q16wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5281 rGPR:$Rm, pred:$p)>;
5282 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5283 (VLD1q32wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5284 rGPR:$Rm, pred:$p)>;
5285 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5286 (VLD1q64wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5287 rGPR:$Rm, pred:$p)>;
5289 // Load three D registers.
5290 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5291 (VLD1d8T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5292 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5293 (VLD1d16T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5294 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5295 (VLD1d32T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5296 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5297 (VLD1d64T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5298 // with writeback, fixed stride
5299 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5300 (VLD1d8Twb_fixed VecListThreeD:$Vd, zero_reg,
5301 addrmode6:$Rn, pred:$p)>;
5302 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5303 (VLD1d16Twb_fixed VecListThreeD:$Vd, zero_reg,
5304 addrmode6:$Rn, pred:$p)>;
5305 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5306 (VLD1d32Twb_fixed VecListThreeD:$Vd, zero_reg,
5307 addrmode6:$Rn, pred:$p)>;
5308 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5309 (VLD1d64Twb_fixed VecListThreeD:$Vd, zero_reg,
5310 addrmode6:$Rn, pred:$p)>;
5311 // with writeback, register stride
5312 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5313 (VLD1d8Twb_register VecListThreeD:$Vd, zero_reg,
5314 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5315 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5316 (VLD1d16Twb_register VecListThreeD:$Vd, zero_reg,
5317 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5318 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5319 (VLD1d32Twb_register VecListThreeD:$Vd, zero_reg,
5320 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5321 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5322 (VLD1d64Twb_register VecListThreeD:$Vd, zero_reg,
5323 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5326 // Load four D registers.
5327 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5328 (VLD1d8Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5329 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5330 (VLD1d16Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5331 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5332 (VLD1d32Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5333 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5334 (VLD1d64Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5335 // with writeback, fixed stride
5336 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5337 (VLD1d8Qwb_fixed VecListFourD:$Vd, zero_reg,
5338 addrmode6:$Rn, pred:$p)>;
5339 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5340 (VLD1d16Qwb_fixed VecListFourD:$Vd, zero_reg,
5341 addrmode6:$Rn, pred:$p)>;
5342 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5343 (VLD1d32Qwb_fixed VecListFourD:$Vd, zero_reg,
5344 addrmode6:$Rn, pred:$p)>;
5345 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5346 (VLD1d64Qwb_fixed VecListFourD:$Vd, zero_reg,
5347 addrmode6:$Rn, pred:$p)>;
5348 // with writeback, register stride
5349 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5350 (VLD1d8Qwb_register VecListFourD:$Vd, zero_reg,
5351 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5352 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5353 (VLD1d16Qwb_register VecListFourD:$Vd, zero_reg,
5354 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5355 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5356 (VLD1d32Qwb_register VecListFourD:$Vd, zero_reg,
5357 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5358 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5359 (VLD1d64Qwb_register VecListFourD:$Vd, zero_reg,
5360 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5362 // VST1 requires a size suffix, but also accepts type specific variants.
5363 // Store one D register.
5364 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5365 (VST1d8 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5366 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5367 (VST1d16 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5368 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5369 (VST1d32 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5370 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5371 (VST1d64 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5372 // with writeback, fixed stride
5373 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5374 (VST1d8wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5375 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5376 (VST1d16wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5377 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5378 (VST1d32wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5379 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5380 (VST1d64wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5381 // with writeback, register stride
5382 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5383 (VST1d8wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5384 VecListOneD:$Vd, pred:$p)>;
5385 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5386 (VST1d16wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5387 VecListOneD:$Vd, pred:$p)>;
5388 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5389 (VST1d32wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5390 VecListOneD:$Vd, pred:$p)>;
5391 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5392 (VST1d64wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5393 VecListOneD:$Vd, pred:$p)>;
5395 // Store two D registers.
5396 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5397 (VST1q8 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5398 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5399 (VST1q16 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5400 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5401 (VST1q32 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5402 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5403 (VST1q64 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5404 // with writeback, fixed stride
5405 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5406 (VST1q8wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5407 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5408 (VST1q16wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5409 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5410 (VST1q32wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5411 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5412 (VST1q64wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5413 // with writeback, register stride
5414 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5415 (VST1q8wb_register zero_reg, addrmode6:$Rn,
5416 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5417 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5418 (VST1q16wb_register zero_reg, addrmode6:$Rn,
5419 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5420 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5421 (VST1q32wb_register zero_reg, addrmode6:$Rn,
5422 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5423 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5424 (VST1q64wb_register zero_reg, addrmode6:$Rn,
5425 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5427 // FIXME: The three and four register VST1 instructions haven't been moved
5428 // to the VecList* encoding yet, so we can't do assembly parsing support
5429 // for them. Uncomment these when that happens.
5430 // Load three D registers.
5431 //defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5432 // (VST1d8T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5433 //defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5434 // (VST1d16T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5435 //defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5436 // (VST1d32T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5437 //defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5438 // (VST1d64T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5440 // Load four D registers.
5441 //defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5442 // (VST1d8Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5443 //defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5444 // (VST1d16Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5445 //defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5446 // (VST1d32Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5447 //defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5448 // (VST1d64Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5451 // VTRN instructions data type suffix aliases for more-specific types.
5452 defm : VFPDT8ReqInstAlias <"vtrn${p}", "$Dd, $Dm",
5453 (VTRNd8 DPR:$Dd, DPR:$Dm, pred:$p)>;
5454 defm : VFPDT16ReqInstAlias<"vtrn${p}", "$Dd, $Dm",
5455 (VTRNd16 DPR:$Dd, DPR:$Dm, pred:$p)>;
5456 defm : VFPDT32ReqInstAlias<"vtrn${p}", "$Dd, $Dm",
5457 (VTRNd32 DPR:$Dd, DPR:$Dm, pred:$p)>;
5459 defm : VFPDT8ReqInstAlias <"vtrn${p}", "$Qd, $Qm",
5460 (VTRNq8 QPR:$Qd, QPR:$Qm, pred:$p)>;
5461 defm : VFPDT16ReqInstAlias<"vtrn${p}", "$Qd, $Qm",
5462 (VTRNq16 QPR:$Qd, QPR:$Qm, pred:$p)>;
5463 defm : VFPDT32ReqInstAlias<"vtrn${p}", "$Qd, $Qm",
5464 (VTRNq32 QPR:$Qd, QPR:$Qm, pred:$p)>;