1 //===- ARM64InstrInfo.td - Describe the ARM64 Instructions -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // ARM64 Instruction definitions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // ARM Instruction Predicate Definitions.
17 def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
18 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
19 def HasNEON : Predicate<"Subtarget->hasNEON()">,
20 AssemblerPredicate<"FeatureNEON", "neon">;
21 def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
22 AssemblerPredicate<"FeatureCrypto", "crypto">;
23 def HasCRC : Predicate<"Subtarget->hasCRC()">,
24 AssemblerPredicate<"FeatureCRC", "crc">;
25 def IsLE : Predicate<"Subtarget->isLittleEndian()">;
26 def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
28 //===----------------------------------------------------------------------===//
29 // ARM64-specific DAG Nodes.
32 // SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
33 def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 // SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
39 def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
45 // SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
46 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
53 def SDT_ARM64Brcond : SDTypeProfile<0, 3,
54 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
56 def SDT_ARM64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
57 def SDT_ARM64tbz : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisVT<1, i64>,
58 SDTCisVT<2, OtherVT>]>;
61 def SDT_ARM64CSel : SDTypeProfile<1, 4,
66 def SDT_ARM64FCmp : SDTypeProfile<0, 2,
69 def SDT_ARM64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
70 def SDT_ARM64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
71 def SDT_ARM64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
74 def SDT_ARM64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
75 def SDT_ARM64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
76 def SDT_ARM64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
77 SDTCisInt<2>, SDTCisInt<3>]>;
78 def SDT_ARM64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
79 def SDT_ARM64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
80 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
81 def SDT_ARM64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
83 def SDT_ARM64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
84 def SDT_ARM64fcmpz : SDTypeProfile<1, 1, []>;
85 def SDT_ARM64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
86 def SDT_ARM64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
88 def SDT_ARM64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
91 def SDT_ARM64TCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
92 def SDT_ARM64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
94 def SDT_ARM64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
96 def SDT_ARM64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
98 def SDT_ARM64WrapperLarge : SDTypeProfile<1, 4,
99 [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
100 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
101 SDTCisSameAs<1, 4>]>;
105 def ARM64adrp : SDNode<"ARM64ISD::ADRP", SDTIntUnaryOp, []>;
106 def ARM64addlow : SDNode<"ARM64ISD::ADDlow", SDTIntBinOp, []>;
107 def ARM64LOADgot : SDNode<"ARM64ISD::LOADgot", SDTIntUnaryOp>;
108 def ARM64callseq_start : SDNode<"ISD::CALLSEQ_START",
109 SDCallSeqStart<[ SDTCisVT<0, i32> ]>,
110 [SDNPHasChain, SDNPOutGlue]>;
111 def ARM64callseq_end : SDNode<"ISD::CALLSEQ_END",
112 SDCallSeqEnd<[ SDTCisVT<0, i32>,
114 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
115 def ARM64call : SDNode<"ARM64ISD::CALL",
116 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
117 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
119 def ARM64brcond : SDNode<"ARM64ISD::BRCOND", SDT_ARM64Brcond,
121 def ARM64cbz : SDNode<"ARM64ISD::CBZ", SDT_ARM64cbz,
123 def ARM64cbnz : SDNode<"ARM64ISD::CBNZ", SDT_ARM64cbz,
125 def ARM64tbz : SDNode<"ARM64ISD::TBZ", SDT_ARM64tbz,
127 def ARM64tbnz : SDNode<"ARM64ISD::TBNZ", SDT_ARM64tbz,
131 def ARM64csel : SDNode<"ARM64ISD::CSEL", SDT_ARM64CSel>;
132 def ARM64csinv : SDNode<"ARM64ISD::CSINV", SDT_ARM64CSel>;
133 def ARM64csneg : SDNode<"ARM64ISD::CSNEG", SDT_ARM64CSel>;
134 def ARM64csinc : SDNode<"ARM64ISD::CSINC", SDT_ARM64CSel>;
135 def ARM64retflag : SDNode<"ARM64ISD::RET_FLAG", SDTNone,
136 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
137 def ARM64adc : SDNode<"ARM64ISD::ADC", SDTBinaryArithWithFlagsIn >;
138 def ARM64sbc : SDNode<"ARM64ISD::SBC", SDTBinaryArithWithFlagsIn>;
139 def ARM64add_flag : SDNode<"ARM64ISD::ADDS", SDTBinaryArithWithFlagsOut,
141 def ARM64sub_flag : SDNode<"ARM64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
142 def ARM64and_flag : SDNode<"ARM64ISD::ANDS", SDTBinaryArithWithFlagsOut,
144 def ARM64adc_flag : SDNode<"ARM64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
145 def ARM64sbc_flag : SDNode<"ARM64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
147 def ARM64threadpointer : SDNode<"ARM64ISD::THREAD_POINTER", SDTPtrLeaf>;
149 def ARM64fcmp : SDNode<"ARM64ISD::FCMP", SDT_ARM64FCmp>;
151 def ARM64fmax : SDNode<"ARM64ISD::FMAX", SDTFPBinOp>;
152 def ARM64fmin : SDNode<"ARM64ISD::FMIN", SDTFPBinOp>;
154 def ARM64dup : SDNode<"ARM64ISD::DUP", SDT_ARM64Dup>;
155 def ARM64duplane8 : SDNode<"ARM64ISD::DUPLANE8", SDT_ARM64DupLane>;
156 def ARM64duplane16 : SDNode<"ARM64ISD::DUPLANE16", SDT_ARM64DupLane>;
157 def ARM64duplane32 : SDNode<"ARM64ISD::DUPLANE32", SDT_ARM64DupLane>;
158 def ARM64duplane64 : SDNode<"ARM64ISD::DUPLANE64", SDT_ARM64DupLane>;
160 def ARM64zip1 : SDNode<"ARM64ISD::ZIP1", SDT_ARM64Zip>;
161 def ARM64zip2 : SDNode<"ARM64ISD::ZIP2", SDT_ARM64Zip>;
162 def ARM64uzp1 : SDNode<"ARM64ISD::UZP1", SDT_ARM64Zip>;
163 def ARM64uzp2 : SDNode<"ARM64ISD::UZP2", SDT_ARM64Zip>;
164 def ARM64trn1 : SDNode<"ARM64ISD::TRN1", SDT_ARM64Zip>;
165 def ARM64trn2 : SDNode<"ARM64ISD::TRN2", SDT_ARM64Zip>;
167 def ARM64movi_edit : SDNode<"ARM64ISD::MOVIedit", SDT_ARM64MOVIedit>;
168 def ARM64movi_shift : SDNode<"ARM64ISD::MOVIshift", SDT_ARM64MOVIshift>;
169 def ARM64movi_msl : SDNode<"ARM64ISD::MOVImsl", SDT_ARM64MOVIshift>;
170 def ARM64mvni_shift : SDNode<"ARM64ISD::MVNIshift", SDT_ARM64MOVIshift>;
171 def ARM64mvni_msl : SDNode<"ARM64ISD::MVNImsl", SDT_ARM64MOVIshift>;
172 def ARM64movi : SDNode<"ARM64ISD::MOVI", SDT_ARM64MOVIedit>;
173 def ARM64fmov : SDNode<"ARM64ISD::FMOV", SDT_ARM64MOVIedit>;
175 def ARM64rev16 : SDNode<"ARM64ISD::REV16", SDT_ARM64UnaryVec>;
176 def ARM64rev32 : SDNode<"ARM64ISD::REV32", SDT_ARM64UnaryVec>;
177 def ARM64rev64 : SDNode<"ARM64ISD::REV64", SDT_ARM64UnaryVec>;
178 def ARM64ext : SDNode<"ARM64ISD::EXT", SDT_ARM64ExtVec>;
180 def ARM64vashr : SDNode<"ARM64ISD::VASHR", SDT_ARM64vshift>;
181 def ARM64vlshr : SDNode<"ARM64ISD::VLSHR", SDT_ARM64vshift>;
182 def ARM64vshl : SDNode<"ARM64ISD::VSHL", SDT_ARM64vshift>;
183 def ARM64sqshli : SDNode<"ARM64ISD::SQSHL_I", SDT_ARM64vshift>;
184 def ARM64uqshli : SDNode<"ARM64ISD::UQSHL_I", SDT_ARM64vshift>;
185 def ARM64sqshlui : SDNode<"ARM64ISD::SQSHLU_I", SDT_ARM64vshift>;
186 def ARM64srshri : SDNode<"ARM64ISD::SRSHR_I", SDT_ARM64vshift>;
187 def ARM64urshri : SDNode<"ARM64ISD::URSHR_I", SDT_ARM64vshift>;
189 def ARM64not: SDNode<"ARM64ISD::NOT", SDT_ARM64unvec>;
190 def ARM64bit: SDNode<"ARM64ISD::BIT", SDT_ARM64trivec>;
191 def ARM64bsl: SDNode<"ARM64ISD::BSL", SDT_ARM64trivec>;
193 def ARM64cmeq: SDNode<"ARM64ISD::CMEQ", SDT_ARM64binvec>;
194 def ARM64cmge: SDNode<"ARM64ISD::CMGE", SDT_ARM64binvec>;
195 def ARM64cmgt: SDNode<"ARM64ISD::CMGT", SDT_ARM64binvec>;
196 def ARM64cmhi: SDNode<"ARM64ISD::CMHI", SDT_ARM64binvec>;
197 def ARM64cmhs: SDNode<"ARM64ISD::CMHS", SDT_ARM64binvec>;
199 def ARM64fcmeq: SDNode<"ARM64ISD::FCMEQ", SDT_ARM64fcmp>;
200 def ARM64fcmge: SDNode<"ARM64ISD::FCMGE", SDT_ARM64fcmp>;
201 def ARM64fcmgt: SDNode<"ARM64ISD::FCMGT", SDT_ARM64fcmp>;
203 def ARM64cmeqz: SDNode<"ARM64ISD::CMEQz", SDT_ARM64unvec>;
204 def ARM64cmgez: SDNode<"ARM64ISD::CMGEz", SDT_ARM64unvec>;
205 def ARM64cmgtz: SDNode<"ARM64ISD::CMGTz", SDT_ARM64unvec>;
206 def ARM64cmlez: SDNode<"ARM64ISD::CMLEz", SDT_ARM64unvec>;
207 def ARM64cmltz: SDNode<"ARM64ISD::CMLTz", SDT_ARM64unvec>;
208 def ARM64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
209 (ARM64not (ARM64cmeqz (and node:$LHS, node:$RHS)))>;
211 def ARM64fcmeqz: SDNode<"ARM64ISD::FCMEQz", SDT_ARM64fcmpz>;
212 def ARM64fcmgez: SDNode<"ARM64ISD::FCMGEz", SDT_ARM64fcmpz>;
213 def ARM64fcmgtz: SDNode<"ARM64ISD::FCMGTz", SDT_ARM64fcmpz>;
214 def ARM64fcmlez: SDNode<"ARM64ISD::FCMLEz", SDT_ARM64fcmpz>;
215 def ARM64fcmltz: SDNode<"ARM64ISD::FCMLTz", SDT_ARM64fcmpz>;
217 def ARM64bici: SDNode<"ARM64ISD::BICi", SDT_ARM64vecimm>;
218 def ARM64orri: SDNode<"ARM64ISD::ORRi", SDT_ARM64vecimm>;
220 def ARM64neg : SDNode<"ARM64ISD::NEG", SDT_ARM64unvec>;
222 def ARM64tcret: SDNode<"ARM64ISD::TC_RETURN", SDT_ARM64TCRET,
223 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
225 def ARM64Prefetch : SDNode<"ARM64ISD::PREFETCH", SDT_ARM64PREFETCH,
226 [SDNPHasChain, SDNPSideEffect]>;
228 def ARM64sitof: SDNode<"ARM64ISD::SITOF", SDT_ARM64ITOF>;
229 def ARM64uitof: SDNode<"ARM64ISD::UITOF", SDT_ARM64ITOF>;
231 def ARM64tlsdesc_call : SDNode<"ARM64ISD::TLSDESC_CALL", SDT_ARM64TLSDescCall,
232 [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
235 def ARM64WrapperLarge : SDNode<"ARM64ISD::WrapperLarge", SDT_ARM64WrapperLarge>;
238 //===----------------------------------------------------------------------===//
240 //===----------------------------------------------------------------------===//
242 // ARM64 Instruction Predicate Definitions.
244 def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
245 def NoZCZ : Predicate<"!Subtarget->hasZeroCycleZeroing()">;
246 def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
247 def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
248 def ForCodeSize : Predicate<"ForCodeSize">;
249 def NotForCodeSize : Predicate<"!ForCodeSize">;
251 include "ARM64InstrFormats.td"
253 //===----------------------------------------------------------------------===//
255 //===----------------------------------------------------------------------===//
256 // Miscellaneous instructions.
257 //===----------------------------------------------------------------------===//
259 let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
260 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
261 [(ARM64callseq_start timm:$amt)]>;
262 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
263 [(ARM64callseq_end timm:$amt1, timm:$amt2)]>;
264 } // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
266 let isReMaterializable = 1, isCodeGenOnly = 1 in {
267 // FIXME: The following pseudo instructions are only needed because remat
268 // cannot handle multiple instructions. When that changes, they can be
269 // removed, along with the ARM64Wrapper node.
271 let AddedComplexity = 10 in
272 def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
273 [(set GPR64:$dst, (ARM64LOADgot tglobaladdr:$addr))]>,
276 // The MOVaddr instruction should match only when the add is not folded
277 // into a load or store address.
279 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
280 [(set GPR64:$dst, (ARM64addlow (ARM64adrp tglobaladdr:$hi),
281 tglobaladdr:$low))]>,
282 Sched<[WriteAdrAdr]>;
284 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
285 [(set GPR64:$dst, (ARM64addlow (ARM64adrp tjumptable:$hi),
287 Sched<[WriteAdrAdr]>;
289 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
290 [(set GPR64:$dst, (ARM64addlow (ARM64adrp tconstpool:$hi),
292 Sched<[WriteAdrAdr]>;
294 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
295 [(set GPR64:$dst, (ARM64addlow (ARM64adrp tblockaddress:$hi),
296 tblockaddress:$low))]>,
297 Sched<[WriteAdrAdr]>;
299 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
300 [(set GPR64:$dst, (ARM64addlow (ARM64adrp tglobaltlsaddr:$hi),
301 tglobaltlsaddr:$low))]>,
302 Sched<[WriteAdrAdr]>;
304 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
305 [(set GPR64:$dst, (ARM64addlow (ARM64adrp texternalsym:$hi),
306 texternalsym:$low))]>,
307 Sched<[WriteAdrAdr]>;
309 } // isReMaterializable, isCodeGenOnly
311 def : Pat<(ARM64LOADgot tglobaltlsaddr:$addr),
312 (LOADgot tglobaltlsaddr:$addr)>;
314 def : Pat<(ARM64LOADgot texternalsym:$addr),
315 (LOADgot texternalsym:$addr)>;
317 def : Pat<(ARM64LOADgot tconstpool:$addr),
318 (LOADgot tconstpool:$addr)>;
320 //===----------------------------------------------------------------------===//
321 // System instructions.
322 //===----------------------------------------------------------------------===//
324 def HINT : HintI<"hint">;
325 def : InstAlias<"nop", (HINT 0b000)>;
326 def : InstAlias<"yield",(HINT 0b001)>;
327 def : InstAlias<"wfe", (HINT 0b010)>;
328 def : InstAlias<"wfi", (HINT 0b011)>;
329 def : InstAlias<"sev", (HINT 0b100)>;
330 def : InstAlias<"sevl", (HINT 0b101)>;
332 // As far as LLVM is concerned this writes to the system's exclusive monitors.
333 let mayLoad = 1, mayStore = 1 in
334 def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
336 def DMB : CRmSystemI<barrier_op, 0b101, "dmb">;
337 def DSB : CRmSystemI<barrier_op, 0b100, "dsb">;
338 def ISB : CRmSystemI<barrier_op, 0b110, "isb">;
339 def : InstAlias<"clrex", (CLREX 0xf)>;
340 def : InstAlias<"isb", (ISB 0xf)>;
344 def MSRpstate: MSRpstateI;
346 // The thread pointer (on Linux, at least, where this has been implemented) is
348 def : Pat<(ARM64threadpointer), (MRS 0xde82)>;
350 // Generic system instructions
351 def SYSxt : SystemXtI<0, "sys">;
352 def SYSLxt : SystemLXtI<1, "sysl">;
354 def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
355 (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
356 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
358 //===----------------------------------------------------------------------===//
359 // Move immediate instructions.
360 //===----------------------------------------------------------------------===//
362 defm MOVK : InsertImmediate<0b11, "movk">;
363 defm MOVN : MoveImmediate<0b00, "movn">;
365 let PostEncoderMethod = "fixMOVZ" in
366 defm MOVZ : MoveImmediate<0b10, "movz">;
368 // First group of aliases covers an implicit "lsl #0".
369 def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
370 def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
371 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
372 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
373 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
374 def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
376 // Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
377 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
378 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
379 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
380 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
382 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
383 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
384 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
385 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
387 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48)>;
388 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32)>;
389 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16)>;
390 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0)>;
392 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
393 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
395 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
396 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
398 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
399 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
401 // Final group of aliases covers true "mov $Rd, $imm" cases.
402 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
403 int width, int shift> {
404 def _asmoperand : AsmOperandClass {
405 let Name = basename # width # "_lsl" # shift # "MovAlias";
406 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
408 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
411 def _movimm : Operand<i32> {
412 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
415 def : InstAlias<"mov $Rd, $imm",
416 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
419 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
420 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
422 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
423 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
424 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
425 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
427 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
428 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
430 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
431 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
432 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
433 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
435 let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
436 isAsCheapAsAMove = 1 in {
437 // FIXME: The following pseudo instructions are only needed because remat
438 // cannot handle multiple instructions. When that changes, we can select
439 // directly to the real instructions and get rid of these pseudos.
442 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
443 [(set GPR32:$dst, imm:$src)]>,
446 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
447 [(set GPR64:$dst, imm:$src)]>,
449 } // isReMaterializable, isCodeGenOnly
451 // If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
452 // eventual expansion code fewer bits to worry about getting right. Marshalling
453 // the types is a little tricky though:
454 def i64imm_32bit : ImmLeaf<i64, [{
455 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
458 def trunc_imm : SDNodeXForm<imm, [{
459 return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i32);
462 def : Pat<(i64 i64imm_32bit:$src),
463 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
465 // Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
467 def : Pat<(ARM64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
468 tglobaladdr:$g1, tglobaladdr:$g0),
469 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g3, 48),
470 tglobaladdr:$g2, 32),
471 tglobaladdr:$g1, 16),
472 tglobaladdr:$g0, 0)>;
474 def : Pat<(ARM64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
475 tblockaddress:$g1, tblockaddress:$g0),
476 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g3, 48),
477 tblockaddress:$g2, 32),
478 tblockaddress:$g1, 16),
479 tblockaddress:$g0, 0)>;
481 def : Pat<(ARM64WrapperLarge tconstpool:$g3, tconstpool:$g2,
482 tconstpool:$g1, tconstpool:$g0),
483 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g3, 48),
488 def : Pat<(ARM64WrapperLarge tjumptable:$g3, tjumptable:$g2,
489 tjumptable:$g1, tjumptable:$g0),
490 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g3, 48),
496 //===----------------------------------------------------------------------===//
497 // Arithmetic instructions.
498 //===----------------------------------------------------------------------===//
500 // Add/subtract with carry.
501 defm ADC : AddSubCarry<0, "adc", "adcs", ARM64adc, ARM64adc_flag>;
502 defm SBC : AddSubCarry<1, "sbc", "sbcs", ARM64sbc, ARM64sbc_flag>;
504 def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
505 def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
506 def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
507 def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
510 defm ADD : AddSub<0, "add", add>;
511 defm SUB : AddSub<1, "sub">;
513 def : InstAlias<"mov $dst, $src",
514 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
515 def : InstAlias<"mov $dst, $src",
516 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
517 def : InstAlias<"mov $dst, $src",
518 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
519 def : InstAlias<"mov $dst, $src",
520 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
522 defm ADDS : AddSubS<0, "adds", ARM64add_flag, "cmn">;
523 defm SUBS : AddSubS<1, "subs", ARM64sub_flag, "cmp">;
525 // Use SUBS instead of SUB to enable CSE between SUBS and SUB.
526 def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
527 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
528 def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
529 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
530 def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
531 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
532 def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
533 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
534 def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
535 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
536 def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
537 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
538 def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3),
539 (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>;
540 def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3),
541 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>;
543 // Because of the immediate format for add/sub-imm instructions, the
544 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
545 // These patterns capture that transformation.
546 let AddedComplexity = 1 in {
547 def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
548 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
549 def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
550 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
551 def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
552 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
553 def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
554 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
557 def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0)>;
558 def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0)>;
559 def : InstAlias<"neg $dst, $src, $shift",
560 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift)>;
561 def : InstAlias<"neg $dst, $src, $shift",
562 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift)>;
564 // Because of the immediate format for add/sub-imm instructions, the
565 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
566 // These patterns capture that transformation.
567 let AddedComplexity = 1 in {
568 def : Pat<(ARM64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
569 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
570 def : Pat<(ARM64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
571 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
572 def : Pat<(ARM64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
573 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
574 def : Pat<(ARM64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
575 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
578 def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0)>;
579 def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0)>;
580 def : InstAlias<"negs $dst, $src, $shift",
581 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift)>;
582 def : InstAlias<"negs $dst, $src, $shift",
583 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift)>;
585 // Unsigned/Signed divide
586 defm UDIV : Div<0, "udiv", udiv>;
587 defm SDIV : Div<1, "sdiv", sdiv>;
588 let isCodeGenOnly = 1 in {
589 defm UDIV_Int : Div<0, "udiv", int_arm64_udiv>;
590 defm SDIV_Int : Div<1, "sdiv", int_arm64_sdiv>;
594 defm ASRV : Shift<0b10, "asr", sra>;
595 defm LSLV : Shift<0b00, "lsl", shl>;
596 defm LSRV : Shift<0b01, "lsr", srl>;
597 defm RORV : Shift<0b11, "ror", rotr>;
599 def : ShiftAlias<"asrv", ASRVWr, GPR32>;
600 def : ShiftAlias<"asrv", ASRVXr, GPR64>;
601 def : ShiftAlias<"lslv", LSLVWr, GPR32>;
602 def : ShiftAlias<"lslv", LSLVXr, GPR64>;
603 def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
604 def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
605 def : ShiftAlias<"rorv", RORVWr, GPR32>;
606 def : ShiftAlias<"rorv", RORVXr, GPR64>;
609 let AddedComplexity = 7 in {
610 defm MADD : MulAccum<0, "madd", add>;
611 defm MSUB : MulAccum<1, "msub", sub>;
613 def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
614 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
615 def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
616 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
618 def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
619 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
620 def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
621 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
622 } // AddedComplexity = 7
624 let AddedComplexity = 5 in {
625 def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
626 def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
627 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
628 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
630 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
631 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
632 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
633 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
635 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
636 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
637 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
638 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
639 } // AddedComplexity = 5
641 def : MulAccumWAlias<"mul", MADDWrrr>;
642 def : MulAccumXAlias<"mul", MADDXrrr>;
643 def : MulAccumWAlias<"mneg", MSUBWrrr>;
644 def : MulAccumXAlias<"mneg", MSUBXrrr>;
645 def : WideMulAccumAlias<"smull", SMADDLrrr>;
646 def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
647 def : WideMulAccumAlias<"umull", UMADDLrrr>;
648 def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
651 def SMULHrr : MulHi<0b010, "smulh", mulhs>;
652 def UMULHrr : MulHi<0b110, "umulh", mulhu>;
655 def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_arm64_crc32b, "crc32b">;
656 def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_arm64_crc32h, "crc32h">;
657 def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_arm64_crc32w, "crc32w">;
658 def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_arm64_crc32x, "crc32x">;
660 def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_arm64_crc32cb, "crc32cb">;
661 def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_arm64_crc32ch, "crc32ch">;
662 def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_arm64_crc32cw, "crc32cw">;
663 def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_arm64_crc32cx, "crc32cx">;
666 //===----------------------------------------------------------------------===//
667 // Logical instructions.
668 //===----------------------------------------------------------------------===//
671 defm ANDS : LogicalImmS<0b11, "ands", ARM64and_flag>;
672 defm AND : LogicalImm<0b00, "and", and>;
673 defm EOR : LogicalImm<0b10, "eor", xor>;
674 defm ORR : LogicalImm<0b01, "orr", or>;
676 def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
677 logical_imm32:$imm)>;
678 def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
679 logical_imm64:$imm)>;
683 defm ANDS : LogicalRegS<0b11, 0, "ands", ARM64and_flag>;
684 defm BICS : LogicalRegS<0b11, 1, "bics",
685 BinOpFrag<(ARM64and_flag node:$LHS, (not node:$RHS))>>;
686 defm AND : LogicalReg<0b00, 0, "and", and>;
687 defm BIC : LogicalReg<0b00, 1, "bic",
688 BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
689 defm EON : LogicalReg<0b10, 1, "eon",
690 BinOpFrag<(xor node:$LHS, (not node:$RHS))>>;
691 defm EOR : LogicalReg<0b10, 0, "eor", xor>;
692 defm ORN : LogicalReg<0b01, 1, "orn",
693 BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
694 defm ORR : LogicalReg<0b01, 0, "orr", or>;
696 def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0)>;
697 def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0)>;
699 def : InstAlias<"tst $src1, $src2",
700 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2)>;
701 def : InstAlias<"tst $src1, $src2",
702 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2)>;
704 def : InstAlias<"tst $src1, $src2",
705 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0)>;
706 def : InstAlias<"tst $src1, $src2",
707 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0)>;
709 def : InstAlias<"tst $src1, $src2, $sh",
710 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh)>;
711 def : InstAlias<"tst $src1, $src2, $sh",
712 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh)>;
714 def : InstAlias<"mvn $Wd, $Wm",
715 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0)>;
716 def : InstAlias<"mvn $Xd, $Xm",
717 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0)>;
719 def : InstAlias<"mvn $Wd, $Wm, $sh",
720 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh)>;
721 def : InstAlias<"mvn $Xd, $Xm, $sh",
722 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh)>;
724 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
725 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
728 //===----------------------------------------------------------------------===//
729 // One operand data processing instructions.
730 //===----------------------------------------------------------------------===//
732 defm CLS : OneOperandData<0b101, "cls">;
733 defm CLZ : OneOperandData<0b100, "clz", ctlz>;
734 defm RBIT : OneOperandData<0b000, "rbit">;
735 def REV16Wr : OneWRegData<0b001, "rev16",
736 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
737 def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
739 def : Pat<(cttz GPR32:$Rn),
740 (CLZWr (RBITWr GPR32:$Rn))>;
741 def : Pat<(cttz GPR64:$Rn),
742 (CLZXr (RBITXr GPR64:$Rn))>;
743 def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
746 def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
750 // Unlike the other one operand instructions, the instructions with the "rev"
751 // mnemonic do *not* just different in the size bit, but actually use different
752 // opcode bits for the different sizes.
753 def REVWr : OneWRegData<0b010, "rev", bswap>;
754 def REVXr : OneXRegData<0b011, "rev", bswap>;
755 def REV32Xr : OneXRegData<0b010, "rev32",
756 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
758 // The bswap commutes with the rotr so we want a pattern for both possible
760 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
761 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
763 //===----------------------------------------------------------------------===//
764 // Bitfield immediate extraction instruction.
765 //===----------------------------------------------------------------------===//
766 let neverHasSideEffects = 1 in
767 defm EXTR : ExtractImm<"extr">;
768 def : InstAlias<"ror $dst, $src, $shift",
769 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
770 def : InstAlias<"ror $dst, $src, $shift",
771 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
773 def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
774 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
775 def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
776 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
778 //===----------------------------------------------------------------------===//
779 // Other bitfield immediate instructions.
780 //===----------------------------------------------------------------------===//
781 let neverHasSideEffects = 1 in {
782 defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
783 defm SBFM : BitfieldImm<0b00, "sbfm">;
784 defm UBFM : BitfieldImm<0b10, "ubfm">;
787 def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
788 uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
789 return CurDAG->getTargetConstant(enc, MVT::i64);
792 def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
793 uint64_t enc = 31 - N->getZExtValue();
794 return CurDAG->getTargetConstant(enc, MVT::i64);
797 // min(7, 31 - shift_amt)
798 def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
799 uint64_t enc = 31 - N->getZExtValue();
800 enc = enc > 7 ? 7 : enc;
801 return CurDAG->getTargetConstant(enc, MVT::i64);
804 // min(15, 31 - shift_amt)
805 def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
806 uint64_t enc = 31 - N->getZExtValue();
807 enc = enc > 15 ? 15 : enc;
808 return CurDAG->getTargetConstant(enc, MVT::i64);
811 def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
812 uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
813 return CurDAG->getTargetConstant(enc, MVT::i64);
816 def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
817 uint64_t enc = 63 - N->getZExtValue();
818 return CurDAG->getTargetConstant(enc, MVT::i64);
821 // min(7, 63 - shift_amt)
822 def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
823 uint64_t enc = 63 - N->getZExtValue();
824 enc = enc > 7 ? 7 : enc;
825 return CurDAG->getTargetConstant(enc, MVT::i64);
828 // min(15, 63 - shift_amt)
829 def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
830 uint64_t enc = 63 - N->getZExtValue();
831 enc = enc > 15 ? 15 : enc;
832 return CurDAG->getTargetConstant(enc, MVT::i64);
835 // min(31, 63 - shift_amt)
836 def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
837 uint64_t enc = 63 - N->getZExtValue();
838 enc = enc > 31 ? 31 : enc;
839 return CurDAG->getTargetConstant(enc, MVT::i64);
842 def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
843 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
844 (i64 (i32shift_b imm0_31:$imm)))>;
845 def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
846 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
847 (i64 (i64shift_b imm0_63:$imm)))>;
849 let AddedComplexity = 10 in {
850 def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
851 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
852 def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
853 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
856 def : InstAlias<"asr $dst, $src, $shift",
857 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
858 def : InstAlias<"asr $dst, $src, $shift",
859 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
860 def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
861 def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
862 def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
863 def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
864 def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
866 def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
867 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
868 def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
869 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
871 def : InstAlias<"lsr $dst, $src, $shift",
872 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
873 def : InstAlias<"lsr $dst, $src, $shift",
874 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
875 def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
876 def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
877 def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
878 def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
879 def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
881 //===----------------------------------------------------------------------===//
882 // Conditionally set flags instructions.
883 //===----------------------------------------------------------------------===//
884 defm CCMN : CondSetFlagsImm<0, "ccmn">;
885 defm CCMP : CondSetFlagsImm<1, "ccmp">;
887 defm CCMN : CondSetFlagsReg<0, "ccmn">;
888 defm CCMP : CondSetFlagsReg<1, "ccmp">;
890 //===----------------------------------------------------------------------===//
891 // Conditional select instructions.
892 //===----------------------------------------------------------------------===//
893 defm CSEL : CondSelect<0, 0b00, "csel">;
895 def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
896 defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
897 defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
898 defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
900 def : Pat<(ARM64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
901 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
902 def : Pat<(ARM64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
903 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
904 def : Pat<(ARM64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
905 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
906 def : Pat<(ARM64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
907 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
908 def : Pat<(ARM64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
909 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
910 def : Pat<(ARM64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
911 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
913 def : Pat<(ARM64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
914 (CSINCWr WZR, WZR, (i32 imm:$cc))>;
915 def : Pat<(ARM64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
916 (CSINCXr XZR, XZR, (i32 imm:$cc))>;
917 def : Pat<(ARM64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
918 (CSINVWr WZR, WZR, (i32 imm:$cc))>;
919 def : Pat<(ARM64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
920 (CSINVXr XZR, XZR, (i32 imm:$cc))>;
922 // The inverse of the condition code from the alias instruction is what is used
923 // in the aliased instruction. The parser all ready inverts the condition code
924 // for these aliases.
925 // FIXME: Is this the correct way to handle these aliases?
926 def : InstAlias<"cset $dst, $cc", (CSINCWr GPR32:$dst, WZR, WZR, ccode:$cc)>;
927 def : InstAlias<"cset $dst, $cc", (CSINCXr GPR64:$dst, XZR, XZR, ccode:$cc)>;
929 def : InstAlias<"csetm $dst, $cc", (CSINVWr GPR32:$dst, WZR, WZR, ccode:$cc)>;
930 def : InstAlias<"csetm $dst, $cc", (CSINVXr GPR64:$dst, XZR, XZR, ccode:$cc)>;
932 def : InstAlias<"cinc $dst, $src, $cc",
933 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, ccode:$cc)>;
934 def : InstAlias<"cinc $dst, $src, $cc",
935 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, ccode:$cc)>;
937 def : InstAlias<"cinv $dst, $src, $cc",
938 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, ccode:$cc)>;
939 def : InstAlias<"cinv $dst, $src, $cc",
940 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, ccode:$cc)>;
942 def : InstAlias<"cneg $dst, $src, $cc",
943 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, ccode:$cc)>;
944 def : InstAlias<"cneg $dst, $src, $cc",
945 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, ccode:$cc)>;
947 //===----------------------------------------------------------------------===//
948 // PC-relative instructions.
949 //===----------------------------------------------------------------------===//
950 let isReMaterializable = 1 in {
951 let neverHasSideEffects = 1, mayStore = 0, mayLoad = 0 in {
952 def ADR : ADRI<0, "adr", adrlabel, []>;
953 } // neverHasSideEffects = 1
955 def ADRP : ADRI<1, "adrp", adrplabel,
956 [(set GPR64:$Xd, (ARM64adrp tglobaladdr:$label))]>;
957 } // isReMaterializable = 1
959 // page address of a constant pool entry, block address
960 def : Pat<(ARM64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
961 def : Pat<(ARM64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
963 //===----------------------------------------------------------------------===//
964 // Unconditional branch (register) instructions.
965 //===----------------------------------------------------------------------===//
967 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
968 def RET : BranchReg<0b0010, "ret", []>;
969 def DRPS : SpecialReturn<0b0101, "drps">;
970 def ERET : SpecialReturn<0b0100, "eret">;
971 } // isReturn = 1, isTerminator = 1, isBarrier = 1
973 // Default to the LR register.
974 def : InstAlias<"ret", (RET LR)>;
976 let isCall = 1, Defs = [LR], Uses = [SP] in {
977 def BLR : BranchReg<0b0001, "blr", [(ARM64call GPR64:$Rn)]>;
980 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
981 def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
982 } // isBranch, isTerminator, isBarrier, isIndirectBranch
984 // Create a separate pseudo-instruction for codegen to use so that we don't
985 // flag lr as used in every function. It'll be restored before the RET by the
986 // epilogue if it's legitimately used.
987 def RET_ReallyLR : Pseudo<(outs), (ins), [(ARM64retflag)]> {
988 let isTerminator = 1;
993 // This is a directive-like pseudo-instruction. The purpose is to insert an
994 // R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
995 // (which in the usual case is a BLR).
996 let hasSideEffects = 1 in
997 def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []> {
998 let AsmString = ".tlsdesccall $sym";
1001 // Pseudo-instruction representing a BLR with attached TLSDESC relocation. It
1002 // gets expanded to two MCInsts during lowering.
1003 let isCall = 1, Defs = [LR] in
1005 : Pseudo<(outs), (ins GPR64:$dest, i64imm:$sym),
1006 [(ARM64tlsdesc_call GPR64:$dest, tglobaltlsaddr:$sym)]>;
1008 def : Pat<(ARM64tlsdesc_call GPR64:$dest, texternalsym:$sym),
1009 (TLSDESC_BLR GPR64:$dest, texternalsym:$sym)>;
1010 //===----------------------------------------------------------------------===//
1011 // Conditional branch (immediate) instruction.
1012 //===----------------------------------------------------------------------===//
1013 def Bcc : BranchCond;
1015 //===----------------------------------------------------------------------===//
1016 // Compare-and-branch instructions.
1017 //===----------------------------------------------------------------------===//
1018 defm CBZ : CmpBranch<0, "cbz", ARM64cbz>;
1019 defm CBNZ : CmpBranch<1, "cbnz", ARM64cbnz>;
1021 //===----------------------------------------------------------------------===//
1022 // Test-bit-and-branch instructions.
1023 //===----------------------------------------------------------------------===//
1024 def TBZ : TestBranch<0, "tbz", ARM64tbz>;
1025 def TBNZ : TestBranch<1, "tbnz", ARM64tbnz>;
1027 //===----------------------------------------------------------------------===//
1028 // Unconditional branch (immediate) instructions.
1029 //===----------------------------------------------------------------------===//
1030 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1031 def B : BranchImm<0, "b", [(br bb:$addr)]>;
1032 } // isBranch, isTerminator, isBarrier
1034 let isCall = 1, Defs = [LR], Uses = [SP] in {
1035 def BL : CallImm<1, "bl", [(ARM64call tglobaladdr:$addr)]>;
1037 def : Pat<(ARM64call texternalsym:$func), (BL texternalsym:$func)>;
1039 //===----------------------------------------------------------------------===//
1040 // Exception generation instructions.
1041 //===----------------------------------------------------------------------===//
1042 def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
1043 def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1044 def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1045 def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1046 def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
1047 def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
1048 def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
1049 def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
1051 // DCPSn defaults to an immediate operand of zero if unspecified.
1052 def : InstAlias<"dcps1", (DCPS1 0)>;
1053 def : InstAlias<"dcps2", (DCPS2 0)>;
1054 def : InstAlias<"dcps3", (DCPS3 0)>;
1056 //===----------------------------------------------------------------------===//
1057 // Load instructions.
1058 //===----------------------------------------------------------------------===//
1060 // Pair (indexed, offset)
1061 def LDPWi : LoadPairOffset<0b00, 0, GPR32, am_indexed32simm7, "ldp">;
1062 def LDPXi : LoadPairOffset<0b10, 0, GPR64, am_indexed64simm7, "ldp">;
1063 def LDPSi : LoadPairOffset<0b00, 1, FPR32, am_indexed32simm7, "ldp">;
1064 def LDPDi : LoadPairOffset<0b01, 1, FPR64, am_indexed64simm7, "ldp">;
1065 def LDPQi : LoadPairOffset<0b10, 1, FPR128, am_indexed128simm7, "ldp">;
1067 def LDPSWi : LoadPairOffset<0b01, 0, GPR64, am_indexed32simm7, "ldpsw">;
1069 // Pair (pre-indexed)
1070 def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32, am_indexed32simm7_wb, "ldp">;
1071 def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64, am_indexed64simm7_wb, "ldp">;
1072 def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32, am_indexed32simm7_wb, "ldp">;
1073 def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64, am_indexed64simm7_wb, "ldp">;
1074 def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128, am_indexed128simm7_wb, "ldp">;
1076 def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64, am_indexed32simm7_wb, "ldpsw">;
1078 // Pair (post-indexed)
1079 def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1080 def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1081 def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1082 def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1083 def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1085 def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1088 // Pair (no allocate)
1089 def LDNPWi : LoadPairNoAlloc<0b00, 0, GPR32, am_indexed32simm7, "ldnp">;
1090 def LDNPXi : LoadPairNoAlloc<0b10, 0, GPR64, am_indexed64simm7, "ldnp">;
1091 def LDNPSi : LoadPairNoAlloc<0b00, 1, FPR32, am_indexed32simm7, "ldnp">;
1092 def LDNPDi : LoadPairNoAlloc<0b01, 1, FPR64, am_indexed64simm7, "ldnp">;
1093 def LDNPQi : LoadPairNoAlloc<0b10, 1, FPR128, am_indexed128simm7, "ldnp">;
1096 // (register offset)
1099 let AddedComplexity = 10 in {
1101 def LDRBBro : Load8RO<0b00, 0, 0b01, GPR32, "ldrb",
1102 [(set GPR32:$Rt, (zextloadi8 ro_indexed8:$addr))]>;
1103 def LDRHHro : Load16RO<0b01, 0, 0b01, GPR32, "ldrh",
1104 [(set GPR32:$Rt, (zextloadi16 ro_indexed16:$addr))]>;
1105 def LDRWro : Load32RO<0b10, 0, 0b01, GPR32, "ldr",
1106 [(set GPR32:$Rt, (load ro_indexed32:$addr))]>;
1107 def LDRXro : Load64RO<0b11, 0, 0b01, GPR64, "ldr",
1108 [(set GPR64:$Rt, (load ro_indexed64:$addr))]>;
1111 def LDRBro : Load8RO<0b00, 1, 0b01, FPR8, "ldr",
1112 [(set FPR8:$Rt, (load ro_indexed8:$addr))]>;
1113 def LDRHro : Load16RO<0b01, 1, 0b01, FPR16, "ldr",
1114 [(set (f16 FPR16:$Rt), (load ro_indexed16:$addr))]>;
1115 def LDRSro : Load32RO<0b10, 1, 0b01, FPR32, "ldr",
1116 [(set (f32 FPR32:$Rt), (load ro_indexed32:$addr))]>;
1117 def LDRDro : Load64RO<0b11, 1, 0b01, FPR64, "ldr",
1118 [(set (f64 FPR64:$Rt), (load ro_indexed64:$addr))]>;
1119 def LDRQro : Load128RO<0b00, 1, 0b11, FPR128, "ldr", []> {
1123 // For regular load, we do not have any alignment requirement.
1124 // Thus, it is safe to directly map the vector loads with interesting
1125 // addressing modes.
1126 // FIXME: We could do the same for bitconvert to floating point vectors.
1127 def : Pat <(v8i8 (scalar_to_vector (i32 (extloadi8 ro_indexed8:$addr)))),
1128 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
1129 (LDRBro ro_indexed8:$addr), bsub)>;
1130 def : Pat <(v16i8 (scalar_to_vector (i32 (extloadi8 ro_indexed8:$addr)))),
1131 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1132 (LDRBro ro_indexed8:$addr), bsub)>;
1133 def : Pat <(v4i16 (scalar_to_vector (i32 (extloadi16 ro_indexed16:$addr)))),
1134 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
1135 (LDRHro ro_indexed16:$addr), hsub)>;
1136 def : Pat <(v8i16 (scalar_to_vector (i32 (extloadi16 ro_indexed16:$addr)))),
1137 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
1138 (LDRHro ro_indexed16:$addr), hsub)>;
1139 def : Pat <(v2i32 (scalar_to_vector (i32 (load ro_indexed32:$addr)))),
1140 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
1141 (LDRSro ro_indexed32:$addr), ssub)>;
1142 def : Pat <(v4i32 (scalar_to_vector (i32 (load ro_indexed32:$addr)))),
1143 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
1144 (LDRSro ro_indexed32:$addr), ssub)>;
1145 def : Pat <(v1i64 (scalar_to_vector (i64 (load ro_indexed64:$addr)))),
1146 (LDRDro ro_indexed64:$addr)>;
1147 def : Pat <(v2i64 (scalar_to_vector (i64 (load ro_indexed64:$addr)))),
1148 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1149 (LDRDro ro_indexed64:$addr), dsub)>;
1151 // Match all load 64 bits width whose type is compatible with FPR64
1152 let Predicates = [IsLE] in {
1153 // We must do vector loads with LD1 in big-endian.
1154 def : Pat<(v2f32 (load ro_indexed64:$addr)), (LDRDro ro_indexed64:$addr)>;
1155 def : Pat<(v8i8 (load ro_indexed64:$addr)), (LDRDro ro_indexed64:$addr)>;
1156 def : Pat<(v4i16 (load ro_indexed64:$addr)), (LDRDro ro_indexed64:$addr)>;
1157 def : Pat<(v2i32 (load ro_indexed64:$addr)), (LDRDro ro_indexed64:$addr)>;
1159 def : Pat<(v1f64 (load ro_indexed64:$addr)), (LDRDro ro_indexed64:$addr)>;
1160 def : Pat<(v1i64 (load ro_indexed64:$addr)), (LDRDro ro_indexed64:$addr)>;
1162 // Match all load 128 bits width whose type is compatible with FPR128
1163 let Predicates = [IsLE] in {
1164 // We must do vector loads with LD1 in big-endian.
1165 def : Pat<(v4f32 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1166 def : Pat<(v2f64 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1167 def : Pat<(v16i8 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1168 def : Pat<(v8i16 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1169 def : Pat<(v4i32 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1170 def : Pat<(v2i64 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1172 def : Pat<(f128 (load ro_indexed128:$addr)), (LDRQro ro_indexed128:$addr)>;
1174 // Load sign-extended half-word
1175 def LDRSHWro : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh",
1176 [(set GPR32:$Rt, (sextloadi16 ro_indexed16:$addr))]>;
1177 def LDRSHXro : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh",
1178 [(set GPR64:$Rt, (sextloadi16 ro_indexed16:$addr))]>;
1180 // Load sign-extended byte
1181 def LDRSBWro : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb",
1182 [(set GPR32:$Rt, (sextloadi8 ro_indexed8:$addr))]>;
1183 def LDRSBXro : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb",
1184 [(set GPR64:$Rt, (sextloadi8 ro_indexed8:$addr))]>;
1186 // Load sign-extended word
1187 def LDRSWro : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw",
1188 [(set GPR64:$Rt, (sextloadi32 ro_indexed32:$addr))]>;
1191 def PRFMro : PrefetchRO<0b11, 0, 0b10, "prfm",
1192 [(ARM64Prefetch imm:$Rt, ro_indexed64:$addr)]>;
1195 def : Pat<(i64 (zextloadi8 ro_indexed8:$addr)),
1196 (SUBREG_TO_REG (i64 0), (LDRBBro ro_indexed8:$addr), sub_32)>;
1197 def : Pat<(i64 (zextloadi16 ro_indexed16:$addr)),
1198 (SUBREG_TO_REG (i64 0), (LDRHHro ro_indexed16:$addr), sub_32)>;
1199 def : Pat<(i64 (zextloadi32 ro_indexed32:$addr)),
1200 (SUBREG_TO_REG (i64 0), (LDRWro ro_indexed32:$addr), sub_32)>;
1202 // zextloadi1 -> zextloadi8
1203 def : Pat<(i32 (zextloadi1 ro_indexed8:$addr)), (LDRBBro ro_indexed8:$addr)>;
1204 def : Pat<(i64 (zextloadi1 ro_indexed8:$addr)),
1205 (SUBREG_TO_REG (i64 0), (LDRBBro ro_indexed8:$addr), sub_32)>;
1207 // extload -> zextload
1208 def : Pat<(i32 (extloadi16 ro_indexed16:$addr)), (LDRHHro ro_indexed16:$addr)>;
1209 def : Pat<(i32 (extloadi8 ro_indexed8:$addr)), (LDRBBro ro_indexed8:$addr)>;
1210 def : Pat<(i32 (extloadi1 ro_indexed8:$addr)), (LDRBBro ro_indexed8:$addr)>;
1211 def : Pat<(i64 (extloadi32 ro_indexed32:$addr)),
1212 (SUBREG_TO_REG (i64 0), (LDRWro ro_indexed32:$addr), sub_32)>;
1213 def : Pat<(i64 (extloadi16 ro_indexed16:$addr)),
1214 (SUBREG_TO_REG (i64 0), (LDRHHro ro_indexed16:$addr), sub_32)>;
1215 def : Pat<(i64 (extloadi8 ro_indexed8:$addr)),
1216 (SUBREG_TO_REG (i64 0), (LDRBBro ro_indexed8:$addr), sub_32)>;
1217 def : Pat<(i64 (extloadi1 ro_indexed8:$addr)),
1218 (SUBREG_TO_REG (i64 0), (LDRBBro ro_indexed8:$addr), sub_32)>;
1220 } // AddedComplexity = 10
1223 // (unsigned immediate)
1225 def LDRXui : LoadUI<0b11, 0, 0b01, GPR64, am_indexed64, "ldr",
1226 [(set GPR64:$Rt, (load am_indexed64:$addr))]>;
1227 def LDRWui : LoadUI<0b10, 0, 0b01, GPR32, am_indexed32, "ldr",
1228 [(set GPR32:$Rt, (load am_indexed32:$addr))]>;
1229 def LDRBui : LoadUI<0b00, 1, 0b01, FPR8, am_indexed8, "ldr",
1230 [(set FPR8:$Rt, (load am_indexed8:$addr))]>;
1231 def LDRHui : LoadUI<0b01, 1, 0b01, FPR16, am_indexed16, "ldr",
1232 [(set (f16 FPR16:$Rt), (load am_indexed16:$addr))]>;
1233 def LDRSui : LoadUI<0b10, 1, 0b01, FPR32, am_indexed32, "ldr",
1234 [(set (f32 FPR32:$Rt), (load am_indexed32:$addr))]>;
1235 def LDRDui : LoadUI<0b11, 1, 0b01, FPR64, am_indexed64, "ldr",
1236 [(set (f64 FPR64:$Rt), (load am_indexed64:$addr))]>;
1237 def LDRQui : LoadUI<0b00, 1, 0b11, FPR128, am_indexed128, "ldr",
1238 [(set (f128 FPR128:$Rt), (load am_indexed128:$addr))]>;
1240 // For regular load, we do not have any alignment requirement.
1241 // Thus, it is safe to directly map the vector loads with interesting
1242 // addressing modes.
1243 // FIXME: We could do the same for bitconvert to floating point vectors.
1244 def : Pat <(v8i8 (scalar_to_vector (i32 (extloadi8 am_indexed8:$addr)))),
1245 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
1246 (LDRBui am_indexed8:$addr), bsub)>;
1247 def : Pat <(v16i8 (scalar_to_vector (i32 (extloadi8 am_indexed8:$addr)))),
1248 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1249 (LDRBui am_indexed8:$addr), bsub)>;
1250 def : Pat <(v4i16 (scalar_to_vector (i32 (extloadi16 am_indexed16:$addr)))),
1251 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
1252 (LDRHui am_indexed16:$addr), hsub)>;
1253 def : Pat <(v8i16 (scalar_to_vector (i32 (extloadi16 am_indexed16:$addr)))),
1254 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
1255 (LDRHui am_indexed16:$addr), hsub)>;
1256 def : Pat <(v2i32 (scalar_to_vector (i32 (load am_indexed32:$addr)))),
1257 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
1258 (LDRSui am_indexed32:$addr), ssub)>;
1259 def : Pat <(v4i32 (scalar_to_vector (i32 (load am_indexed32:$addr)))),
1260 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
1261 (LDRSui am_indexed32:$addr), ssub)>;
1262 def : Pat <(v1i64 (scalar_to_vector (i64 (load am_indexed64:$addr)))),
1263 (LDRDui am_indexed64:$addr)>;
1264 def : Pat <(v2i64 (scalar_to_vector (i64 (load am_indexed64:$addr)))),
1265 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1266 (LDRDui am_indexed64:$addr), dsub)>;
1268 // Match all load 64 bits width whose type is compatible with FPR64
1269 let Predicates = [IsLE] in {
1270 // We must use LD1 to perform vector loads in big-endian.
1271 def : Pat<(v2f32 (load am_indexed64:$addr)), (LDRDui am_indexed64:$addr)>;
1272 def : Pat<(v8i8 (load am_indexed64:$addr)), (LDRDui am_indexed64:$addr)>;
1273 def : Pat<(v4i16 (load am_indexed64:$addr)), (LDRDui am_indexed64:$addr)>;
1274 def : Pat<(v2i32 (load am_indexed64:$addr)), (LDRDui am_indexed64:$addr)>;
1276 def : Pat<(v1f64 (load am_indexed64:$addr)), (LDRDui am_indexed64:$addr)>;
1277 def : Pat<(v1i64 (load am_indexed64:$addr)), (LDRDui am_indexed64:$addr)>;
1279 // Match all load 128 bits width whose type is compatible with FPR128
1280 let Predicates = [IsLE] in {
1281 // We must use LD1 to perform vector loads in big-endian.
1282 def : Pat<(v4f32 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1283 def : Pat<(v2f64 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1284 def : Pat<(v16i8 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1285 def : Pat<(v8i16 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1286 def : Pat<(v4i32 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1287 def : Pat<(v2i64 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1289 def : Pat<(f128 (load am_indexed128:$addr)), (LDRQui am_indexed128:$addr)>;
1291 def LDRHHui : LoadUI<0b01, 0, 0b01, GPR32, am_indexed16, "ldrh",
1292 [(set GPR32:$Rt, (zextloadi16 am_indexed16:$addr))]>;
1293 def LDRBBui : LoadUI<0b00, 0, 0b01, GPR32, am_indexed8, "ldrb",
1294 [(set GPR32:$Rt, (zextloadi8 am_indexed8:$addr))]>;
1296 def : Pat<(i64 (zextloadi8 am_indexed8:$addr)),
1297 (SUBREG_TO_REG (i64 0), (LDRBBui am_indexed8:$addr), sub_32)>;
1298 def : Pat<(i64 (zextloadi16 am_indexed16:$addr)),
1299 (SUBREG_TO_REG (i64 0), (LDRHHui am_indexed16:$addr), sub_32)>;
1301 // zextloadi1 -> zextloadi8
1302 def : Pat<(i32 (zextloadi1 am_indexed8:$addr)), (LDRBBui am_indexed8:$addr)>;
1303 def : Pat<(i64 (zextloadi1 am_indexed8:$addr)),
1304 (SUBREG_TO_REG (i64 0), (LDRBBui am_indexed8:$addr), sub_32)>;
1306 // extload -> zextload
1307 def : Pat<(i32 (extloadi16 am_indexed16:$addr)), (LDRHHui am_indexed16:$addr)>;
1308 def : Pat<(i32 (extloadi8 am_indexed8:$addr)), (LDRBBui am_indexed8:$addr)>;
1309 def : Pat<(i32 (extloadi1 am_indexed8:$addr)), (LDRBBui am_indexed8:$addr)>;
1310 def : Pat<(i64 (extloadi32 am_indexed32:$addr)),
1311 (SUBREG_TO_REG (i64 0), (LDRWui am_indexed32:$addr), sub_32)>;
1312 def : Pat<(i64 (extloadi16 am_indexed16:$addr)),
1313 (SUBREG_TO_REG (i64 0), (LDRHHui am_indexed16:$addr), sub_32)>;
1314 def : Pat<(i64 (extloadi8 am_indexed8:$addr)),
1315 (SUBREG_TO_REG (i64 0), (LDRBBui am_indexed8:$addr), sub_32)>;
1316 def : Pat<(i64 (extloadi1 am_indexed8:$addr)),
1317 (SUBREG_TO_REG (i64 0), (LDRBBui am_indexed8:$addr), sub_32)>;
1319 // load sign-extended half-word
1320 def LDRSHWui : LoadUI<0b01, 0, 0b11, GPR32, am_indexed16, "ldrsh",
1321 [(set GPR32:$Rt, (sextloadi16 am_indexed16:$addr))]>;
1322 def LDRSHXui : LoadUI<0b01, 0, 0b10, GPR64, am_indexed16, "ldrsh",
1323 [(set GPR64:$Rt, (sextloadi16 am_indexed16:$addr))]>;
1325 // load sign-extended byte
1326 def LDRSBWui : LoadUI<0b00, 0, 0b11, GPR32, am_indexed8, "ldrsb",
1327 [(set GPR32:$Rt, (sextloadi8 am_indexed8:$addr))]>;
1328 def LDRSBXui : LoadUI<0b00, 0, 0b10, GPR64, am_indexed8, "ldrsb",
1329 [(set GPR64:$Rt, (sextloadi8 am_indexed8:$addr))]>;
1331 // load sign-extended word
1332 def LDRSWui : LoadUI<0b10, 0, 0b10, GPR64, am_indexed32, "ldrsw",
1333 [(set GPR64:$Rt, (sextloadi32 am_indexed32:$addr))]>;
1335 // load zero-extended word
1336 def : Pat<(i64 (zextloadi32 am_indexed32:$addr)),
1337 (SUBREG_TO_REG (i64 0), (LDRWui am_indexed32:$addr), sub_32)>;
1340 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
1341 [(ARM64Prefetch imm:$Rt, am_indexed64:$addr)]>;
1345 def LDRWl : LoadLiteral<0b00, 0, GPR32, "ldr">;
1346 def LDRXl : LoadLiteral<0b01, 0, GPR64, "ldr">;
1347 def LDRSl : LoadLiteral<0b00, 1, FPR32, "ldr">;
1348 def LDRDl : LoadLiteral<0b01, 1, FPR64, "ldr">;
1349 def LDRQl : LoadLiteral<0b10, 1, FPR128, "ldr">;
1351 // load sign-extended word
1352 def LDRSWl : LoadLiteral<0b10, 0, GPR64, "ldrsw">;
1355 def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
1356 // [(ARM64Prefetch imm:$Rt, tglobaladdr:$label)]>;
1359 // (unscaled immediate)
1360 def LDURXi : LoadUnscaled<0b11, 0, 0b01, GPR64, am_unscaled64, "ldur",
1361 [(set GPR64:$Rt, (load am_unscaled64:$addr))]>;
1362 def LDURWi : LoadUnscaled<0b10, 0, 0b01, GPR32, am_unscaled32, "ldur",
1363 [(set GPR32:$Rt, (load am_unscaled32:$addr))]>;
1364 def LDURBi : LoadUnscaled<0b00, 1, 0b01, FPR8, am_unscaled8, "ldur",
1365 [(set FPR8:$Rt, (load am_unscaled8:$addr))]>;
1366 def LDURHi : LoadUnscaled<0b01, 1, 0b01, FPR16, am_unscaled16, "ldur",
1367 [(set (f16 FPR16:$Rt), (load am_unscaled16:$addr))]>;
1368 def LDURSi : LoadUnscaled<0b10, 1, 0b01, FPR32, am_unscaled32, "ldur",
1369 [(set (f32 FPR32:$Rt), (load am_unscaled32:$addr))]>;
1370 def LDURDi : LoadUnscaled<0b11, 1, 0b01, FPR64, am_unscaled64, "ldur",
1371 [(set (f64 FPR64:$Rt), (load am_unscaled64:$addr))]>;
1372 def LDURQi : LoadUnscaled<0b00, 1, 0b11, FPR128, am_unscaled128, "ldur",
1373 [(set (f128 FPR128:$Rt), (load am_unscaled128:$addr))]>;
1376 : LoadUnscaled<0b01, 0, 0b01, GPR32, am_unscaled16, "ldurh",
1377 [(set GPR32:$Rt, (zextloadi16 am_unscaled16:$addr))]>;
1379 : LoadUnscaled<0b00, 0, 0b01, GPR32, am_unscaled8, "ldurb",
1380 [(set GPR32:$Rt, (zextloadi8 am_unscaled8:$addr))]>;
1382 // Match all load 64 bits width whose type is compatible with FPR64
1383 let Predicates = [IsLE] in {
1384 def : Pat<(v2f32 (load am_unscaled64:$addr)), (LDURDi am_unscaled64:$addr)>;
1385 def : Pat<(v8i8 (load am_unscaled64:$addr)), (LDURDi am_unscaled64:$addr)>;
1386 def : Pat<(v4i16 (load am_unscaled64:$addr)), (LDURDi am_unscaled64:$addr)>;
1387 def : Pat<(v2i32 (load am_unscaled64:$addr)), (LDURDi am_unscaled64:$addr)>;
1389 def : Pat<(v1f64 (load am_unscaled64:$addr)), (LDURDi am_unscaled64:$addr)>;
1390 def : Pat<(v1i64 (load am_unscaled64:$addr)), (LDURDi am_unscaled64:$addr)>;
1392 // Match all load 128 bits width whose type is compatible with FPR128
1393 let Predicates = [IsLE] in {
1394 def : Pat<(v4f32 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1395 def : Pat<(v2f64 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1396 def : Pat<(v16i8 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1397 def : Pat<(v8i16 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1398 def : Pat<(v4i32 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1399 def : Pat<(v2i64 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1400 def : Pat<(v2f64 (load am_unscaled128:$addr)), (LDURQi am_unscaled128:$addr)>;
1404 def : Pat<(i32 (extloadi16 am_unscaled16:$addr)), (LDURHHi am_unscaled16:$addr)>;
1405 def : Pat<(i32 (extloadi8 am_unscaled8:$addr)), (LDURBBi am_unscaled8:$addr)>;
1406 def : Pat<(i32 (extloadi1 am_unscaled8:$addr)), (LDURBBi am_unscaled8:$addr)>;
1407 def : Pat<(i64 (extloadi32 am_unscaled32:$addr)),
1408 (SUBREG_TO_REG (i64 0), (LDURWi am_unscaled32:$addr), sub_32)>;
1409 def : Pat<(i64 (extloadi16 am_unscaled16:$addr)),
1410 (SUBREG_TO_REG (i64 0), (LDURHHi am_unscaled16:$addr), sub_32)>;
1411 def : Pat<(i64 (extloadi8 am_unscaled8:$addr)),
1412 (SUBREG_TO_REG (i64 0), (LDURBBi am_unscaled8:$addr), sub_32)>;
1413 def : Pat<(i64 (extloadi1 am_unscaled8:$addr)),
1414 (SUBREG_TO_REG (i64 0), (LDURBBi am_unscaled8:$addr), sub_32)>;
1416 def : Pat<(i32 (zextloadi16 am_unscaled16:$addr)),
1417 (LDURHHi am_unscaled16:$addr)>;
1418 def : Pat<(i32 (zextloadi8 am_unscaled8:$addr)),
1419 (LDURBBi am_unscaled8:$addr)>;
1420 def : Pat<(i32 (zextloadi1 am_unscaled8:$addr)),
1421 (LDURBBi am_unscaled8:$addr)>;
1422 def : Pat<(i64 (zextloadi32 am_unscaled32:$addr)),
1423 (SUBREG_TO_REG (i64 0), (LDURWi am_unscaled32:$addr), sub_32)>;
1424 def : Pat<(i64 (zextloadi16 am_unscaled16:$addr)),
1425 (SUBREG_TO_REG (i64 0), (LDURHHi am_unscaled16:$addr), sub_32)>;
1426 def : Pat<(i64 (zextloadi8 am_unscaled8:$addr)),
1427 (SUBREG_TO_REG (i64 0), (LDURBBi am_unscaled8:$addr), sub_32)>;
1428 def : Pat<(i64 (zextloadi1 am_unscaled8:$addr)),
1429 (SUBREG_TO_REG (i64 0), (LDURBBi am_unscaled8:$addr), sub_32)>;
1433 // LDR mnemonics fall back to LDUR for negative or unaligned offsets.
1435 // Define new assembler match classes as we want to only match these when
1436 // the don't otherwise match the scaled addressing mode for LDR/STR. Don't
1437 // associate a DiagnosticType either, as we want the diagnostic for the
1438 // canonical form (the scaled operand) to take precedence.
1439 def MemoryUnscaledFB8Operand : AsmOperandClass {
1440 let Name = "MemoryUnscaledFB8";
1441 let RenderMethod = "addMemoryUnscaledOperands";
1443 def MemoryUnscaledFB16Operand : AsmOperandClass {
1444 let Name = "MemoryUnscaledFB16";
1445 let RenderMethod = "addMemoryUnscaledOperands";
1447 def MemoryUnscaledFB32Operand : AsmOperandClass {
1448 let Name = "MemoryUnscaledFB32";
1449 let RenderMethod = "addMemoryUnscaledOperands";
1451 def MemoryUnscaledFB64Operand : AsmOperandClass {
1452 let Name = "MemoryUnscaledFB64";
1453 let RenderMethod = "addMemoryUnscaledOperands";
1455 def MemoryUnscaledFB128Operand : AsmOperandClass {
1456 let Name = "MemoryUnscaledFB128";
1457 let RenderMethod = "addMemoryUnscaledOperands";
1459 def am_unscaled_fb8 : Operand<i64> {
1460 let ParserMatchClass = MemoryUnscaledFB8Operand;
1461 let MIOperandInfo = (ops GPR64sp:$base, i64imm:$offset);
1463 def am_unscaled_fb16 : Operand<i64> {
1464 let ParserMatchClass = MemoryUnscaledFB16Operand;
1465 let MIOperandInfo = (ops GPR64sp:$base, i64imm:$offset);
1467 def am_unscaled_fb32 : Operand<i64> {
1468 let ParserMatchClass = MemoryUnscaledFB32Operand;
1469 let MIOperandInfo = (ops GPR64sp:$base, i64imm:$offset);
1471 def am_unscaled_fb64 : Operand<i64> {
1472 let ParserMatchClass = MemoryUnscaledFB64Operand;
1473 let MIOperandInfo = (ops GPR64sp:$base, i64imm:$offset);
1475 def am_unscaled_fb128 : Operand<i64> {
1476 let ParserMatchClass = MemoryUnscaledFB128Operand;
1477 let MIOperandInfo = (ops GPR64sp:$base, i64imm:$offset);
1479 def : InstAlias<"ldr $Rt, $addr", (LDURXi GPR64:$Rt, am_unscaled_fb64:$addr)>;
1480 def : InstAlias<"ldr $Rt, $addr", (LDURWi GPR32:$Rt, am_unscaled_fb32:$addr)>;
1481 def : InstAlias<"ldr $Rt, $addr", (LDURBi FPR8:$Rt, am_unscaled_fb8:$addr)>;
1482 def : InstAlias<"ldr $Rt, $addr", (LDURHi FPR16:$Rt, am_unscaled_fb16:$addr)>;
1483 def : InstAlias<"ldr $Rt, $addr", (LDURSi FPR32:$Rt, am_unscaled_fb32:$addr)>;
1484 def : InstAlias<"ldr $Rt, $addr", (LDURDi FPR64:$Rt, am_unscaled_fb64:$addr)>;
1485 def : InstAlias<"ldr $Rt, $addr", (LDURQi FPR128:$Rt, am_unscaled_fb128:$addr)>;
1488 def : Pat<(i64 (zextloadi8 am_unscaled8:$addr)),
1489 (SUBREG_TO_REG (i64 0), (LDURBBi am_unscaled8:$addr), sub_32)>;
1490 def : Pat<(i64 (zextloadi16 am_unscaled16:$addr)),
1491 (SUBREG_TO_REG (i64 0), (LDURHHi am_unscaled16:$addr), sub_32)>;
1493 // load sign-extended half-word
1495 : LoadUnscaled<0b01, 0, 0b11, GPR32, am_unscaled16, "ldursh",
1496 [(set GPR32:$Rt, (sextloadi16 am_unscaled16:$addr))]>;
1498 : LoadUnscaled<0b01, 0, 0b10, GPR64, am_unscaled16, "ldursh",
1499 [(set GPR64:$Rt, (sextloadi16 am_unscaled16:$addr))]>;
1501 // load sign-extended byte
1503 : LoadUnscaled<0b00, 0, 0b11, GPR32, am_unscaled8, "ldursb",
1504 [(set GPR32:$Rt, (sextloadi8 am_unscaled8:$addr))]>;
1506 : LoadUnscaled<0b00, 0, 0b10, GPR64, am_unscaled8, "ldursb",
1507 [(set GPR64:$Rt, (sextloadi8 am_unscaled8:$addr))]>;
1509 // load sign-extended word
1511 : LoadUnscaled<0b10, 0, 0b10, GPR64, am_unscaled32, "ldursw",
1512 [(set GPR64:$Rt, (sextloadi32 am_unscaled32:$addr))]>;
1514 // zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
1515 def : InstAlias<"ldrb $Rt, $addr", (LDURBBi GPR32:$Rt, am_unscaled_fb8:$addr)>;
1516 def : InstAlias<"ldrh $Rt, $addr", (LDURHHi GPR32:$Rt, am_unscaled_fb16:$addr)>;
1517 def : InstAlias<"ldrsb $Rt, $addr", (LDURSBWi GPR32:$Rt, am_unscaled_fb8:$addr)>;
1518 def : InstAlias<"ldrsb $Rt, $addr", (LDURSBXi GPR64:$Rt, am_unscaled_fb8:$addr)>;
1519 def : InstAlias<"ldrsh $Rt, $addr", (LDURSHWi GPR32:$Rt, am_unscaled_fb16:$addr)>;
1520 def : InstAlias<"ldrsh $Rt, $addr", (LDURSHXi GPR64:$Rt, am_unscaled_fb16:$addr)>;
1521 def : InstAlias<"ldrsw $Rt, $addr", (LDURSWi GPR64:$Rt, am_unscaled_fb32:$addr)>;
1524 def PRFUMi : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
1525 [(ARM64Prefetch imm:$Rt, am_unscaled64:$addr)]>;
1528 // (unscaled immediate, unprivileged)
1529 def LDTRXi : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
1530 def LDTRWi : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
1532 def LDTRHi : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
1533 def LDTRBi : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
1535 // load sign-extended half-word
1536 def LDTRSHWi : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
1537 def LDTRSHXi : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
1539 // load sign-extended byte
1540 def LDTRSBWi : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
1541 def LDTRSBXi : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
1543 // load sign-extended word
1544 def LDTRSWi : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
1547 // (immediate pre-indexed)
1548 def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32, "ldr">;
1549 def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64, "ldr">;
1550 def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8, "ldr">;
1551 def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16, "ldr">;
1552 def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32, "ldr">;
1553 def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64, "ldr">;
1554 def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128, "ldr">;
1556 // load sign-extended half-word
1557 def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1558 def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1560 // load sign-extended byte
1561 def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1562 def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1564 // load zero-extended byte
1565 def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1566 def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1568 // load sign-extended word
1569 def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1571 // ISel pseudos and patterns. See expanded comment on LoadPreIdxPseudo.
1572 def LDRQpre_isel : LoadPreIdxPseudo<FPR128>;
1573 def LDRDpre_isel : LoadPreIdxPseudo<FPR64>;
1574 def LDRSpre_isel : LoadPreIdxPseudo<FPR32>;
1575 def LDRXpre_isel : LoadPreIdxPseudo<GPR64>;
1576 def LDRWpre_isel : LoadPreIdxPseudo<GPR32>;
1577 def LDRHHpre_isel : LoadPreIdxPseudo<GPR32>;
1578 def LDRBBpre_isel : LoadPreIdxPseudo<GPR32>;
1580 def LDRSWpre_isel : LoadPreIdxPseudo<GPR64>;
1581 def LDRSHWpre_isel : LoadPreIdxPseudo<GPR32>;
1582 def LDRSHXpre_isel : LoadPreIdxPseudo<GPR64>;
1583 def LDRSBWpre_isel : LoadPreIdxPseudo<GPR32>;
1584 def LDRSBXpre_isel : LoadPreIdxPseudo<GPR64>;
1587 // (immediate post-indexed)
1588 def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32, "ldr">;
1589 def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64, "ldr">;
1590 def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8, "ldr">;
1591 def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16, "ldr">;
1592 def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32, "ldr">;
1593 def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64, "ldr">;
1594 def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128, "ldr">;
1596 // load sign-extended half-word
1597 def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1598 def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1600 // load sign-extended byte
1601 def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1602 def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1604 // load zero-extended byte
1605 def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1606 def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1608 // load sign-extended word
1609 def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1611 // ISel pseudos and patterns. See expanded comment on LoadPostIdxPseudo.
1612 def LDRQpost_isel : LoadPostIdxPseudo<FPR128>;
1613 def LDRDpost_isel : LoadPostIdxPseudo<FPR64>;
1614 def LDRSpost_isel : LoadPostIdxPseudo<FPR32>;
1615 def LDRXpost_isel : LoadPostIdxPseudo<GPR64>;
1616 def LDRWpost_isel : LoadPostIdxPseudo<GPR32>;
1617 def LDRHHpost_isel : LoadPostIdxPseudo<GPR32>;
1618 def LDRBBpost_isel : LoadPostIdxPseudo<GPR32>;
1620 def LDRSWpost_isel : LoadPostIdxPseudo<GPR64>;
1621 def LDRSHWpost_isel : LoadPostIdxPseudo<GPR32>;
1622 def LDRSHXpost_isel : LoadPostIdxPseudo<GPR64>;
1623 def LDRSBWpost_isel : LoadPostIdxPseudo<GPR32>;
1624 def LDRSBXpost_isel : LoadPostIdxPseudo<GPR64>;
1626 //===----------------------------------------------------------------------===//
1627 // Store instructions.
1628 //===----------------------------------------------------------------------===//
1630 // Pair (indexed, offset)
1631 // FIXME: Use dedicated range-checked addressing mode operand here.
1632 def STPWi : StorePairOffset<0b00, 0, GPR32, am_indexed32simm7, "stp">;
1633 def STPXi : StorePairOffset<0b10, 0, GPR64, am_indexed64simm7, "stp">;
1634 def STPSi : StorePairOffset<0b00, 1, FPR32, am_indexed32simm7, "stp">;
1635 def STPDi : StorePairOffset<0b01, 1, FPR64, am_indexed64simm7, "stp">;
1636 def STPQi : StorePairOffset<0b10, 1, FPR128, am_indexed128simm7, "stp">;
1638 // Pair (pre-indexed)
1639 def STPWpre : StorePairPreIdx<0b00, 0, GPR32, am_indexed32simm7_wb, "stp">;
1640 def STPXpre : StorePairPreIdx<0b10, 0, GPR64, am_indexed64simm7_wb, "stp">;
1641 def STPSpre : StorePairPreIdx<0b00, 1, FPR32, am_indexed32simm7_wb, "stp">;
1642 def STPDpre : StorePairPreIdx<0b01, 1, FPR64, am_indexed64simm7_wb, "stp">;
1643 def STPQpre : StorePairPreIdx<0b10, 1, FPR128, am_indexed128simm7_wb, "stp">;
1645 // Pair (pre-indexed)
1646 def STPWpost : StorePairPostIdx<0b00, 0, GPR32, simm7s4, "stp">;
1647 def STPXpost : StorePairPostIdx<0b10, 0, GPR64, simm7s8, "stp">;
1648 def STPSpost : StorePairPostIdx<0b00, 1, FPR32, simm7s4, "stp">;
1649 def STPDpost : StorePairPostIdx<0b01, 1, FPR64, simm7s8, "stp">;
1650 def STPQpost : StorePairPostIdx<0b10, 1, FPR128, simm7s16, "stp">;
1652 // Pair (no allocate)
1653 def STNPWi : StorePairNoAlloc<0b00, 0, GPR32, am_indexed32simm7, "stnp">;
1654 def STNPXi : StorePairNoAlloc<0b10, 0, GPR64, am_indexed64simm7, "stnp">;
1655 def STNPSi : StorePairNoAlloc<0b00, 1, FPR32, am_indexed32simm7, "stnp">;
1656 def STNPDi : StorePairNoAlloc<0b01, 1, FPR64, am_indexed64simm7, "stnp">;
1657 def STNPQi : StorePairNoAlloc<0b10, 1, FPR128, am_indexed128simm7, "stnp">;
1660 // (Register offset)
1662 let AddedComplexity = 10 in {
1665 def STRHHro : Store16RO<0b01, 0, 0b00, GPR32, "strh",
1666 [(truncstorei16 GPR32:$Rt, ro_indexed16:$addr)]>;
1667 def STRBBro : Store8RO<0b00, 0, 0b00, GPR32, "strb",
1668 [(truncstorei8 GPR32:$Rt, ro_indexed8:$addr)]>;
1669 def STRWro : Store32RO<0b10, 0, 0b00, GPR32, "str",
1670 [(store GPR32:$Rt, ro_indexed32:$addr)]>;
1671 def STRXro : Store64RO<0b11, 0, 0b00, GPR64, "str",
1672 [(store GPR64:$Rt, ro_indexed64:$addr)]>;
1675 def : Pat<(truncstorei8 GPR64:$Rt, ro_indexed8:$addr),
1676 (STRBBro (EXTRACT_SUBREG GPR64:$Rt, sub_32), ro_indexed8:$addr)>;
1677 def : Pat<(truncstorei16 GPR64:$Rt, ro_indexed16:$addr),
1678 (STRHHro (EXTRACT_SUBREG GPR64:$Rt, sub_32), ro_indexed16:$addr)>;
1679 def : Pat<(truncstorei32 GPR64:$Rt, ro_indexed32:$addr),
1680 (STRWro (EXTRACT_SUBREG GPR64:$Rt, sub_32), ro_indexed32:$addr)>;
1684 def STRBro : Store8RO<0b00, 1, 0b00, FPR8, "str",
1685 [(store FPR8:$Rt, ro_indexed8:$addr)]>;
1686 def STRHro : Store16RO<0b01, 1, 0b00, FPR16, "str",
1687 [(store (f16 FPR16:$Rt), ro_indexed16:$addr)]>;
1688 def STRSro : Store32RO<0b10, 1, 0b00, FPR32, "str",
1689 [(store (f32 FPR32:$Rt), ro_indexed32:$addr)]>;
1690 def STRDro : Store64RO<0b11, 1, 0b00, FPR64, "str",
1691 [(store (f64 FPR64:$Rt), ro_indexed64:$addr)]>;
1692 def STRQro : Store128RO<0b00, 1, 0b10, FPR128, "str", []> {
1696 // Match all store 64 bits width whose type is compatible with FPR64
1697 let Predicates = [IsLE] in {
1698 // We must use ST1 to store vectors in big-endian.
1699 def : Pat<(store (v2f32 FPR64:$Rn), ro_indexed64:$addr),
1700 (STRDro FPR64:$Rn, ro_indexed64:$addr)>;
1701 def : Pat<(store (v8i8 FPR64:$Rn), ro_indexed64:$addr),
1702 (STRDro FPR64:$Rn, ro_indexed64:$addr)>;
1703 def : Pat<(store (v4i16 FPR64:$Rn), ro_indexed64:$addr),
1704 (STRDro FPR64:$Rn, ro_indexed64:$addr)>;
1705 def : Pat<(store (v2i32 FPR64:$Rn), ro_indexed64:$addr),
1706 (STRDro FPR64:$Rn, ro_indexed64:$addr)>;
1708 def : Pat<(store (v1f64 FPR64:$Rn), ro_indexed64:$addr),
1709 (STRDro FPR64:$Rn, ro_indexed64:$addr)>;
1710 def : Pat<(store (v1i64 FPR64:$Rn), ro_indexed64:$addr),
1711 (STRDro FPR64:$Rn, ro_indexed64:$addr)>;
1713 // Match all store 128 bits width whose type is compatible with FPR128
1714 let Predicates = [IsLE] in {
1715 // We must use ST1 to store vectors in big-endian.
1716 def : Pat<(store (v4f32 FPR128:$Rn), ro_indexed128:$addr),
1717 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1718 def : Pat<(store (v2f64 FPR128:$Rn), ro_indexed128:$addr),
1719 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1720 def : Pat<(store (v16i8 FPR128:$Rn), ro_indexed128:$addr),
1721 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1722 def : Pat<(store (v8i16 FPR128:$Rn), ro_indexed128:$addr),
1723 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1724 def : Pat<(store (v4i32 FPR128:$Rn), ro_indexed128:$addr),
1725 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1726 def : Pat<(store (v2i64 FPR128:$Rn), ro_indexed128:$addr),
1727 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1729 def : Pat<(store (f128 FPR128:$Rn), ro_indexed128:$addr),
1730 (STRQro FPR128:$Rn, ro_indexed128:$addr)>;
1733 // (unsigned immediate)
1734 def STRXui : StoreUI<0b11, 0, 0b00, GPR64, am_indexed64, "str",
1735 [(store GPR64:$Rt, am_indexed64:$addr)]>;
1736 def STRWui : StoreUI<0b10, 0, 0b00, GPR32, am_indexed32, "str",
1737 [(store GPR32:$Rt, am_indexed32:$addr)]>;
1738 def STRBui : StoreUI<0b00, 1, 0b00, FPR8, am_indexed8, "str",
1739 [(store FPR8:$Rt, am_indexed8:$addr)]>;
1740 def STRHui : StoreUI<0b01, 1, 0b00, FPR16, am_indexed16, "str",
1741 [(store (f16 FPR16:$Rt), am_indexed16:$addr)]>;
1742 def STRSui : StoreUI<0b10, 1, 0b00, FPR32, am_indexed32, "str",
1743 [(store (f32 FPR32:$Rt), am_indexed32:$addr)]>;
1744 def STRDui : StoreUI<0b11, 1, 0b00, FPR64, am_indexed64, "str",
1745 [(store (f64 FPR64:$Rt), am_indexed64:$addr)]>;
1746 def STRQui : StoreUI<0b00, 1, 0b10, FPR128, am_indexed128, "str", []> {
1750 // Match all store 64 bits width whose type is compatible with FPR64
1751 let Predicates = [IsLE] in {
1752 // We must use ST1 to store vectors in big-endian.
1753 def : Pat<(store (v2f32 FPR64:$Rn), am_indexed64:$addr),
1754 (STRDui FPR64:$Rn, am_indexed64:$addr)>;
1755 def : Pat<(store (v8i8 FPR64:$Rn), am_indexed64:$addr),
1756 (STRDui FPR64:$Rn, am_indexed64:$addr)>;
1757 def : Pat<(store (v4i16 FPR64:$Rn), am_indexed64:$addr),
1758 (STRDui FPR64:$Rn, am_indexed64:$addr)>;
1759 def : Pat<(store (v2i32 FPR64:$Rn), am_indexed64:$addr),
1760 (STRDui FPR64:$Rn, am_indexed64:$addr)>;
1762 def : Pat<(store (v1f64 FPR64:$Rn), am_indexed64:$addr),
1763 (STRDui FPR64:$Rn, am_indexed64:$addr)>;
1764 def : Pat<(store (v1i64 FPR64:$Rn), am_indexed64:$addr),
1765 (STRDui FPR64:$Rn, am_indexed64:$addr)>;
1767 // Match all store 128 bits width whose type is compatible with FPR128
1768 let Predicates = [IsLE] in {
1769 // We must use ST1 to store vectors in big-endian.
1770 def : Pat<(store (v4f32 FPR128:$Rn), am_indexed128:$addr),
1771 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1772 def : Pat<(store (v2f64 FPR128:$Rn), am_indexed128:$addr),
1773 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1774 def : Pat<(store (v16i8 FPR128:$Rn), am_indexed128:$addr),
1775 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1776 def : Pat<(store (v8i16 FPR128:$Rn), am_indexed128:$addr),
1777 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1778 def : Pat<(store (v4i32 FPR128:$Rn), am_indexed128:$addr),
1779 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1780 def : Pat<(store (v2i64 FPR128:$Rn), am_indexed128:$addr),
1781 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1783 def : Pat<(store (f128 FPR128:$Rn), am_indexed128:$addr),
1784 (STRQui FPR128:$Rn, am_indexed128:$addr)>;
1786 def STRHHui : StoreUI<0b01, 0, 0b00, GPR32, am_indexed16, "strh",
1787 [(truncstorei16 GPR32:$Rt, am_indexed16:$addr)]>;
1788 def STRBBui : StoreUI<0b00, 0, 0b00, GPR32, am_indexed8, "strb",
1789 [(truncstorei8 GPR32:$Rt, am_indexed8:$addr)]>;
1792 def : Pat<(truncstorei32 GPR64:$Rt, am_indexed32:$addr),
1793 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_indexed32:$addr)>;
1794 def : Pat<(truncstorei16 GPR64:$Rt, am_indexed16:$addr),
1795 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_indexed16:$addr)>;
1796 def : Pat<(truncstorei8 GPR64:$Rt, am_indexed8:$addr),
1797 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_indexed8:$addr)>;
1799 } // AddedComplexity = 10
1802 // (unscaled immediate)
1803 def STURXi : StoreUnscaled<0b11, 0, 0b00, GPR64, am_unscaled64, "stur",
1804 [(store GPR64:$Rt, am_unscaled64:$addr)]>;
1805 def STURWi : StoreUnscaled<0b10, 0, 0b00, GPR32, am_unscaled32, "stur",
1806 [(store GPR32:$Rt, am_unscaled32:$addr)]>;
1807 def STURBi : StoreUnscaled<0b00, 1, 0b00, FPR8, am_unscaled8, "stur",
1808 [(store FPR8:$Rt, am_unscaled8:$addr)]>;
1809 def STURHi : StoreUnscaled<0b01, 1, 0b00, FPR16, am_unscaled16, "stur",
1810 [(store (f16 FPR16:$Rt), am_unscaled16:$addr)]>;
1811 def STURSi : StoreUnscaled<0b10, 1, 0b00, FPR32, am_unscaled32, "stur",
1812 [(store (f32 FPR32:$Rt), am_unscaled32:$addr)]>;
1813 def STURDi : StoreUnscaled<0b11, 1, 0b00, FPR64, am_unscaled64, "stur",
1814 [(store (f64 FPR64:$Rt), am_unscaled64:$addr)]>;
1815 def STURQi : StoreUnscaled<0b00, 1, 0b10, FPR128, am_unscaled128, "stur",
1816 [(store (f128 FPR128:$Rt), am_unscaled128:$addr)]>;
1817 def STURHHi : StoreUnscaled<0b01, 0, 0b00, GPR32, am_unscaled16, "sturh",
1818 [(truncstorei16 GPR32:$Rt, am_unscaled16:$addr)]>;
1819 def STURBBi : StoreUnscaled<0b00, 0, 0b00, GPR32, am_unscaled8, "sturb",
1820 [(truncstorei8 GPR32:$Rt, am_unscaled8:$addr)]>;
1822 // Match all store 64 bits width whose type is compatible with FPR64
1823 let Predicates = [IsLE] in {
1824 // We must use ST1 to store vectors in big-endian.
1825 def : Pat<(store (v2f32 FPR64:$Rn), am_unscaled64:$addr),
1826 (STURDi FPR64:$Rn, am_unscaled64:$addr)>;
1827 def : Pat<(store (v8i8 FPR64:$Rn), am_unscaled64:$addr),
1828 (STURDi FPR64:$Rn, am_unscaled64:$addr)>;
1829 def : Pat<(store (v4i16 FPR64:$Rn), am_unscaled64:$addr),
1830 (STURDi FPR64:$Rn, am_unscaled64:$addr)>;
1831 def : Pat<(store (v2i32 FPR64:$Rn), am_unscaled64:$addr),
1832 (STURDi FPR64:$Rn, am_unscaled64:$addr)>;
1834 def : Pat<(store (v1f64 FPR64:$Rn), am_unscaled64:$addr),
1835 (STURDi FPR64:$Rn, am_unscaled64:$addr)>;
1836 def : Pat<(store (v1i64 FPR64:$Rn), am_unscaled64:$addr),
1837 (STURDi FPR64:$Rn, am_unscaled64:$addr)>;
1839 // Match all store 128 bits width whose type is compatible with FPR128
1840 let Predicates = [IsLE] in {
1841 // We must use ST1 to store vectors in big-endian.
1842 def : Pat<(store (v4f32 FPR128:$Rn), am_unscaled128:$addr),
1843 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1844 def : Pat<(store (v2f64 FPR128:$Rn), am_unscaled128:$addr),
1845 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1846 def : Pat<(store (v16i8 FPR128:$Rn), am_unscaled128:$addr),
1847 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1848 def : Pat<(store (v8i16 FPR128:$Rn), am_unscaled128:$addr),
1849 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1850 def : Pat<(store (v4i32 FPR128:$Rn), am_unscaled128:$addr),
1851 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1852 def : Pat<(store (v2i64 FPR128:$Rn), am_unscaled128:$addr),
1853 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1854 def : Pat<(store (v2f64 FPR128:$Rn), am_unscaled128:$addr),
1855 (STURQi FPR128:$Rn, am_unscaled128:$addr)>;
1858 // unscaled i64 truncating stores
1859 def : Pat<(truncstorei32 GPR64:$Rt, am_unscaled32:$addr),
1860 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_unscaled32:$addr)>;
1861 def : Pat<(truncstorei16 GPR64:$Rt, am_unscaled16:$addr),
1862 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_unscaled16:$addr)>;
1863 def : Pat<(truncstorei8 GPR64:$Rt, am_unscaled8:$addr),
1864 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_unscaled8:$addr)>;
1867 // STR mnemonics fall back to STUR for negative or unaligned offsets.
1868 def : InstAlias<"str $Rt, $addr", (STURXi GPR64:$Rt, am_unscaled_fb64:$addr)>;
1869 def : InstAlias<"str $Rt, $addr", (STURWi GPR32:$Rt, am_unscaled_fb32:$addr)>;
1870 def : InstAlias<"str $Rt, $addr", (STURBi FPR8:$Rt, am_unscaled_fb8:$addr)>;
1871 def : InstAlias<"str $Rt, $addr", (STURHi FPR16:$Rt, am_unscaled_fb16:$addr)>;
1872 def : InstAlias<"str $Rt, $addr", (STURSi FPR32:$Rt, am_unscaled_fb32:$addr)>;
1873 def : InstAlias<"str $Rt, $addr", (STURDi FPR64:$Rt, am_unscaled_fb64:$addr)>;
1874 def : InstAlias<"str $Rt, $addr", (STURQi FPR128:$Rt, am_unscaled_fb128:$addr)>;
1876 def : InstAlias<"strb $Rt, $addr", (STURBBi GPR32:$Rt, am_unscaled_fb8:$addr)>;
1877 def : InstAlias<"strh $Rt, $addr", (STURHHi GPR32:$Rt, am_unscaled_fb16:$addr)>;
1880 // (unscaled immediate, unprivileged)
1881 def STTRWi : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
1882 def STTRXi : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
1884 def STTRHi : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
1885 def STTRBi : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
1888 // (immediate pre-indexed)
1889 def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32, "str">;
1890 def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64, "str">;
1891 def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8, "str">;
1892 def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16, "str">;
1893 def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32, "str">;
1894 def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64, "str">;
1895 def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128, "str">;
1897 def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32, "strb">;
1898 def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32, "strh">;
1900 // ISel pseudos and patterns. See expanded comment on StorePreIdxPseudo.
1901 defm STRQpre : StorePreIdxPseudo<FPR128, f128, pre_store>;
1902 defm STRDpre : StorePreIdxPseudo<FPR64, f64, pre_store>;
1903 defm STRSpre : StorePreIdxPseudo<FPR32, f32, pre_store>;
1904 defm STRXpre : StorePreIdxPseudo<GPR64, i64, pre_store>;
1905 defm STRWpre : StorePreIdxPseudo<GPR32, i32, pre_store>;
1906 defm STRHHpre : StorePreIdxPseudo<GPR32, i32, pre_truncsti16>;
1907 defm STRBBpre : StorePreIdxPseudo<GPR32, i32, pre_truncsti8>;
1909 def : Pat<(pre_truncsti32 GPR64:$Rt, am_noindex:$addr, simm9:$off),
1910 (STRWpre_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
1912 def : Pat<(pre_truncsti16 GPR64:$Rt, am_noindex:$addr, simm9:$off),
1913 (STRHHpre_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
1915 def : Pat<(pre_truncsti8 GPR64:$Rt, am_noindex:$addr, simm9:$off),
1916 (STRBBpre_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
1919 def : Pat<(pre_store (v8i8 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1920 (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1921 def : Pat<(pre_store (v4i16 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1922 (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1923 def : Pat<(pre_store (v2i32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1924 (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1925 def : Pat<(pre_store (v2f32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1926 (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1927 def : Pat<(pre_store (v1i64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1928 (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1929 def : Pat<(pre_store (v1f64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1930 (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1932 def : Pat<(pre_store (v16i8 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1933 (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1934 def : Pat<(pre_store (v8i16 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1935 (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1936 def : Pat<(pre_store (v4i32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1937 (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1938 def : Pat<(pre_store (v4f32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1939 (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1940 def : Pat<(pre_store (v2i64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1941 (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1942 def : Pat<(pre_store (v2f64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1943 (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1946 // (immediate post-indexed)
1947 def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32, "str">;
1948 def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64, "str">;
1949 def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8, "str">;
1950 def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16, "str">;
1951 def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32, "str">;
1952 def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64, "str">;
1953 def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128, "str">;
1955 def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32, "strb">;
1956 def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32, "strh">;
1958 // ISel pseudos and patterns. See expanded comment on StorePostIdxPseudo.
1959 defm STRQpost : StorePostIdxPseudo<FPR128, f128, post_store, STRQpost>;
1960 defm STRDpost : StorePostIdxPseudo<FPR64, f64, post_store, STRDpost>;
1961 defm STRSpost : StorePostIdxPseudo<FPR32, f32, post_store, STRSpost>;
1962 defm STRXpost : StorePostIdxPseudo<GPR64, i64, post_store, STRXpost>;
1963 defm STRWpost : StorePostIdxPseudo<GPR32, i32, post_store, STRWpost>;
1964 defm STRHHpost : StorePostIdxPseudo<GPR32, i32, post_truncsti16, STRHHpost>;
1965 defm STRBBpost : StorePostIdxPseudo<GPR32, i32, post_truncsti8, STRBBpost>;
1967 def : Pat<(post_truncsti32 GPR64:$Rt, am_noindex:$addr, simm9:$off),
1968 (STRWpost_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
1970 def : Pat<(post_truncsti16 GPR64:$Rt, am_noindex:$addr, simm9:$off),
1971 (STRHHpost_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
1973 def : Pat<(post_truncsti8 GPR64:$Rt, am_noindex:$addr, simm9:$off),
1974 (STRBBpost_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
1977 def : Pat<(post_store (v8i8 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1978 (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1979 def : Pat<(post_store (v4i16 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1980 (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1981 def : Pat<(post_store (v2i32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1982 (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1983 def : Pat<(post_store (v2f32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1984 (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1985 def : Pat<(post_store (v1i64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1986 (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1987 def : Pat<(post_store (v1f64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
1988 (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
1990 def : Pat<(post_store (v16i8 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1991 (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1992 def : Pat<(post_store (v8i16 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1993 (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1994 def : Pat<(post_store (v4i32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1995 (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1996 def : Pat<(post_store (v4f32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1997 (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
1998 def : Pat<(post_store (v2i64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
1999 (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
2000 def : Pat<(post_store (v2f64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
2001 (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
2003 //===----------------------------------------------------------------------===//
2004 // Load/store exclusive instructions.
2005 //===----------------------------------------------------------------------===//
2007 def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
2008 def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
2009 def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
2010 def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
2012 def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
2013 def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
2014 def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
2015 def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
2017 def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
2018 def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
2019 def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
2020 def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
2022 def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
2023 def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
2024 def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
2025 def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
2027 def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
2028 def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
2029 def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
2030 def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
2032 def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
2033 def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
2034 def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
2035 def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
2037 def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
2038 def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
2040 def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
2041 def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
2043 def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
2044 def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
2046 def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
2047 def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
2049 //===----------------------------------------------------------------------===//
2050 // Scaled floating point to integer conversion instructions.
2051 //===----------------------------------------------------------------------===//
2053 defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_arm64_neon_fcvtas>;
2054 defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_arm64_neon_fcvtau>;
2055 defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_arm64_neon_fcvtms>;
2056 defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_arm64_neon_fcvtmu>;
2057 defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_arm64_neon_fcvtns>;
2058 defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_arm64_neon_fcvtnu>;
2059 defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_arm64_neon_fcvtps>;
2060 defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_arm64_neon_fcvtpu>;
2061 defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2062 defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2063 defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2064 defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2065 let isCodeGenOnly = 1 in {
2066 defm FCVTZS_Int : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", int_arm64_neon_fcvtzs>;
2067 defm FCVTZU_Int : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", int_arm64_neon_fcvtzu>;
2068 defm FCVTZS_Int : FPToIntegerScaled<0b11, 0b000, "fcvtzs", int_arm64_neon_fcvtzs>;
2069 defm FCVTZU_Int : FPToIntegerScaled<0b11, 0b001, "fcvtzu", int_arm64_neon_fcvtzu>;
2072 //===----------------------------------------------------------------------===//
2073 // Scaled integer to floating point conversion instructions.
2074 //===----------------------------------------------------------------------===//
2076 defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>;
2077 defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
2079 //===----------------------------------------------------------------------===//
2080 // Unscaled integer to floating point conversion instruction.
2081 //===----------------------------------------------------------------------===//
2083 defm FMOV : UnscaledConversion<"fmov">;
2085 def : Pat<(f32 (fpimm0)), (FMOVWSr WZR)>, Requires<[NoZCZ]>;
2086 def : Pat<(f64 (fpimm0)), (FMOVXDr XZR)>, Requires<[NoZCZ]>;
2088 //===----------------------------------------------------------------------===//
2089 // Floating point conversion instruction.
2090 //===----------------------------------------------------------------------===//
2092 defm FCVT : FPConversion<"fcvt">;
2094 def : Pat<(f32_to_f16 FPR32:$Rn),
2095 (i32 (COPY_TO_REGCLASS
2096 (f32 (SUBREG_TO_REG (i32 0), (FCVTHSr FPR32:$Rn), hsub)),
2099 def FCVTSHpseudo : Pseudo<(outs FPR32:$Rd), (ins FPR32:$Rn),
2100 [(set (f32 FPR32:$Rd), (f16_to_f32 i32:$Rn))]>;
2102 //===----------------------------------------------------------------------===//
2103 // Floating point single operand instructions.
2104 //===----------------------------------------------------------------------===//
2106 defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
2107 defm FMOV : SingleOperandFPData<0b0000, "fmov">;
2108 defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
2109 defm FRINTA : SingleOperandFPData<0b1100, "frinta", frnd>;
2110 defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
2111 defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
2112 defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_arm64_neon_frintn>;
2113 defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
2115 def : Pat<(v1f64 (int_arm64_neon_frintn (v1f64 FPR64:$Rn))),
2116 (FRINTNDr FPR64:$Rn)>;
2118 // FRINTX is inserted to set the flags as required by FENV_ACCESS ON behavior
2119 // in the C spec. Setting hasSideEffects ensures it is not DCE'd.
2120 // <rdar://problem/13715968>
2121 // TODO: We should really model the FPSR flags correctly. This is really ugly.
2122 let hasSideEffects = 1 in {
2123 defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
2126 defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
2128 let SchedRW = [WriteFDiv] in {
2129 defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
2132 //===----------------------------------------------------------------------===//
2133 // Floating point two operand instructions.
2134 //===----------------------------------------------------------------------===//
2136 defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
2137 let SchedRW = [WriteFDiv] in {
2138 defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
2140 defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", int_arm64_neon_fmaxnm>;
2141 defm FMAX : TwoOperandFPData<0b0100, "fmax", ARM64fmax>;
2142 defm FMINNM : TwoOperandFPData<0b0111, "fminnm", int_arm64_neon_fminnm>;
2143 defm FMIN : TwoOperandFPData<0b0101, "fmin", ARM64fmin>;
2144 let SchedRW = [WriteFMul] in {
2145 defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
2146 defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
2148 defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
2150 def : Pat<(v1f64 (ARM64fmax (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2151 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
2152 def : Pat<(v1f64 (ARM64fmin (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2153 (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
2154 def : Pat<(v1f64 (int_arm64_neon_fmaxnm (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2155 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
2156 def : Pat<(v1f64 (int_arm64_neon_fminnm (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2157 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
2159 //===----------------------------------------------------------------------===//
2160 // Floating point three operand instructions.
2161 //===----------------------------------------------------------------------===//
2163 defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
2164 defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
2165 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
2166 defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
2167 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
2168 defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
2169 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
2171 // The following def pats catch the case where the LHS of an FMA is negated.
2172 // The TriOpFrag above catches the case where the middle operand is negated.
2174 // N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
2175 // the NEON variant.
2176 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
2177 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2179 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
2180 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2182 // We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and
2184 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
2185 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2187 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
2188 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2190 def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
2191 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2193 def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
2194 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2196 //===----------------------------------------------------------------------===//
2197 // Floating point comparison instructions.
2198 //===----------------------------------------------------------------------===//
2200 defm FCMPE : FPComparison<1, "fcmpe">;
2201 defm FCMP : FPComparison<0, "fcmp", ARM64fcmp>;
2203 //===----------------------------------------------------------------------===//
2204 // Floating point conditional comparison instructions.
2205 //===----------------------------------------------------------------------===//
2207 defm FCCMPE : FPCondComparison<1, "fccmpe">;
2208 defm FCCMP : FPCondComparison<0, "fccmp">;
2210 //===----------------------------------------------------------------------===//
2211 // Floating point conditional select instruction.
2212 //===----------------------------------------------------------------------===//
2214 defm FCSEL : FPCondSelect<"fcsel">;
2216 // CSEL instructions providing f128 types need to be handled by a
2217 // pseudo-instruction since the eventual code will need to introduce basic
2218 // blocks and control flow.
2219 def F128CSEL : Pseudo<(outs FPR128:$Rd),
2220 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
2221 [(set (f128 FPR128:$Rd),
2222 (ARM64csel FPR128:$Rn, FPR128:$Rm,
2223 (i32 imm:$cond), NZCV))]> {
2225 let usesCustomInserter = 1;
2229 //===----------------------------------------------------------------------===//
2230 // Floating point immediate move.
2231 //===----------------------------------------------------------------------===//
2233 let isReMaterializable = 1 in {
2234 defm FMOV : FPMoveImmediate<"fmov">;
2237 //===----------------------------------------------------------------------===//
2238 // Advanced SIMD two vector instructions.
2239 //===----------------------------------------------------------------------===//
2241 defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_arm64_neon_abs>;
2242 defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_arm64_neon_cls>;
2243 defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
2244 defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", ARM64cmeqz>;
2245 defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", ARM64cmgez>;
2246 defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", ARM64cmgtz>;
2247 defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", ARM64cmlez>;
2248 defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", ARM64cmltz>;
2249 defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
2250 defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
2252 defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", ARM64fcmeqz>;
2253 defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", ARM64fcmgez>;
2254 defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", ARM64fcmgtz>;
2255 defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", ARM64fcmlez>;
2256 defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", ARM64fcmltz>;
2257 defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_arm64_neon_fcvtas>;
2258 defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_arm64_neon_fcvtau>;
2259 defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
2260 def : Pat<(v4f32 (int_arm64_neon_vcvthf2fp (v4i16 V64:$Rn))),
2261 (FCVTLv4i16 V64:$Rn)>;
2262 def : Pat<(v4f32 (int_arm64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
2264 (FCVTLv8i16 V128:$Rn)>;
2265 def : Pat<(v2f64 (fextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
2266 def : Pat<(v2f64 (fextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
2268 (FCVTLv4i32 V128:$Rn)>;
2270 defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_arm64_neon_fcvtms>;
2271 defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_arm64_neon_fcvtmu>;
2272 defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_arm64_neon_fcvtns>;
2273 defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_arm64_neon_fcvtnu>;
2274 defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
2275 def : Pat<(v4i16 (int_arm64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
2276 (FCVTNv4i16 V128:$Rn)>;
2277 def : Pat<(concat_vectors V64:$Rd,
2278 (v4i16 (int_arm64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
2279 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2280 def : Pat<(v2f32 (fround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
2281 def : Pat<(concat_vectors V64:$Rd, (v2f32 (fround (v2f64 V128:$Rn)))),
2282 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2283 defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_arm64_neon_fcvtps>;
2284 defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_arm64_neon_fcvtpu>;
2285 defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
2286 int_arm64_neon_fcvtxn>;
2287 defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
2288 defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
2289 let isCodeGenOnly = 1 in {
2290 defm FCVTZS_Int : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs",
2291 int_arm64_neon_fcvtzs>;
2292 defm FCVTZU_Int : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu",
2293 int_arm64_neon_fcvtzu>;
2295 defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
2296 defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_arm64_neon_frecpe>;
2297 defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", frnd>;
2298 defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
2299 defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
2300 defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_arm64_neon_frintn>;
2301 defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
2302 defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
2303 defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
2304 defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_arm64_neon_frsqrte>;
2305 defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
2306 defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
2307 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
2308 defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
2309 // Aliases for MVN -> NOT.
2310 def : InstAlias<"mvn.8b $Vd, $Vn", (NOTv8i8 V64:$Vd, V64:$Vn)>;
2311 def : InstAlias<"mvn.16b $Vd, $Vn", (NOTv16i8 V128:$Vd, V128:$Vn)>;
2312 def : InstAlias<"mvn $Vd.8b, $Vn.8b", (NOTv8i8 V64:$Vd, V64:$Vn)>;
2313 def : InstAlias<"mvn $Vd.16b, $Vn.16b", (NOTv16i8 V128:$Vd, V128:$Vn)>;
2315 def : Pat<(ARM64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
2316 def : Pat<(ARM64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
2317 def : Pat<(ARM64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
2318 def : Pat<(ARM64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
2319 def : Pat<(ARM64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
2320 def : Pat<(ARM64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
2321 def : Pat<(ARM64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
2323 def : Pat<(ARM64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2324 def : Pat<(ARM64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2325 def : Pat<(ARM64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2326 def : Pat<(ARM64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2327 def : Pat<(ARM64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2328 def : Pat<(ARM64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2329 def : Pat<(ARM64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2330 def : Pat<(ARM64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2332 def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2333 def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2334 def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2335 def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2336 def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2338 defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_arm64_neon_rbit>;
2339 defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", ARM64rev16>;
2340 defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", ARM64rev32>;
2341 defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", ARM64rev64>;
2342 defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
2343 BinOpFrag<(add node:$LHS, (int_arm64_neon_saddlp node:$RHS))> >;
2344 defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_arm64_neon_saddlp>;
2345 defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
2346 defm SHLL : SIMDVectorLShiftLongBySizeBHS;
2347 defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_arm64_neon_sqabs>;
2348 defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_arm64_neon_sqneg>;
2349 defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_arm64_neon_sqxtn>;
2350 defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_arm64_neon_sqxtun>;
2351 defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_arm64_neon_suqadd>;
2352 defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
2353 BinOpFrag<(add node:$LHS, (int_arm64_neon_uaddlp node:$RHS))> >;
2354 defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
2355 int_arm64_neon_uaddlp>;
2356 defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
2357 defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_arm64_neon_uqxtn>;
2358 defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_arm64_neon_urecpe>;
2359 defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_arm64_neon_ursqrte>;
2360 defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_arm64_neon_usqadd>;
2361 defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
2363 def : Pat<(v2f32 (ARM64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
2364 def : Pat<(v4f32 (ARM64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
2366 // Patterns for vector long shift (by element width). These need to match all
2367 // three of zext, sext and anyext so it's easier to pull the patterns out of the
2369 multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
2370 def : Pat<(ARM64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
2371 (SHLLv8i8 V64:$Rn)>;
2372 def : Pat<(ARM64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
2373 (SHLLv16i8 V128:$Rn)>;
2374 def : Pat<(ARM64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
2375 (SHLLv4i16 V64:$Rn)>;
2376 def : Pat<(ARM64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
2377 (SHLLv8i16 V128:$Rn)>;
2378 def : Pat<(ARM64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
2379 (SHLLv2i32 V64:$Rn)>;
2380 def : Pat<(ARM64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
2381 (SHLLv4i32 V128:$Rn)>;
2384 defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
2385 defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
2386 defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
2388 //===----------------------------------------------------------------------===//
2389 // Advanced SIMD three vector instructions.
2390 //===----------------------------------------------------------------------===//
2392 defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
2393 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_arm64_neon_addp>;
2394 defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", ARM64cmeq>;
2395 defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", ARM64cmge>;
2396 defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", ARM64cmgt>;
2397 defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", ARM64cmhi>;
2398 defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", ARM64cmhs>;
2399 defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", ARM64cmtst>;
2400 defm FABD : SIMDThreeSameVectorFP<1,1,0b11010,"fabd", int_arm64_neon_fabd>;
2401 defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b11101,"facge",int_arm64_neon_facge>;
2402 defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b11101,"facgt",int_arm64_neon_facgt>;
2403 defm FADDP : SIMDThreeSameVectorFP<1,0,0b11010,"faddp",int_arm64_neon_addp>;
2404 defm FADD : SIMDThreeSameVectorFP<0,0,0b11010,"fadd", fadd>;
2405 defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b11100, "fcmeq", ARM64fcmeq>;
2406 defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b11100, "fcmge", ARM64fcmge>;
2407 defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b11100, "fcmgt", ARM64fcmgt>;
2408 defm FDIV : SIMDThreeSameVectorFP<1,0,0b11111,"fdiv", fdiv>;
2409 defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b11000,"fmaxnmp", int_arm64_neon_fmaxnmp>;
2410 defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b11000,"fmaxnm", int_arm64_neon_fmaxnm>;
2411 defm FMAXP : SIMDThreeSameVectorFP<1,0,0b11110,"fmaxp", int_arm64_neon_fmaxp>;
2412 defm FMAX : SIMDThreeSameVectorFP<0,0,0b11110,"fmax", ARM64fmax>;
2413 defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b11000,"fminnmp", int_arm64_neon_fminnmp>;
2414 defm FMINNM : SIMDThreeSameVectorFP<0,1,0b11000,"fminnm", int_arm64_neon_fminnm>;
2415 defm FMINP : SIMDThreeSameVectorFP<1,1,0b11110,"fminp", int_arm64_neon_fminp>;
2416 defm FMIN : SIMDThreeSameVectorFP<0,1,0b11110,"fmin", ARM64fmin>;
2418 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
2419 // instruction expects the addend first, while the fma intrinsic puts it last.
2420 defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b11001, "fmla",
2421 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
2422 defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b11001, "fmls",
2423 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
2425 // The following def pats catch the case where the LHS of an FMA is negated.
2426 // The TriOpFrag above catches the case where the middle operand is negated.
2427 def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
2428 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
2430 def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2431 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
2433 def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2434 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
2436 defm FMULX : SIMDThreeSameVectorFP<0,0,0b11011,"fmulx", int_arm64_neon_fmulx>;
2437 defm FMUL : SIMDThreeSameVectorFP<1,0,0b11011,"fmul", fmul>;
2438 defm FRECPS : SIMDThreeSameVectorFP<0,0,0b11111,"frecps", int_arm64_neon_frecps>;
2439 defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b11111,"frsqrts", int_arm64_neon_frsqrts>;
2440 defm FSUB : SIMDThreeSameVectorFP<0,1,0b11010,"fsub", fsub>;
2441 defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla",
2442 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >;
2443 defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls",
2444 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >;
2445 defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
2446 defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_arm64_neon_pmul>;
2447 defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
2448 TriOpFrag<(add node:$LHS, (int_arm64_neon_sabd node:$MHS, node:$RHS))> >;
2449 defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_arm64_neon_sabd>;
2450 defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_arm64_neon_shadd>;
2451 defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_arm64_neon_shsub>;
2452 defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_arm64_neon_smaxp>;
2453 defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", int_arm64_neon_smax>;
2454 defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_arm64_neon_sminp>;
2455 defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", int_arm64_neon_smin>;
2456 defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_arm64_neon_sqadd>;
2457 defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_arm64_neon_sqdmulh>;
2458 defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_arm64_neon_sqrdmulh>;
2459 defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_arm64_neon_sqrshl>;
2460 defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_arm64_neon_sqshl>;
2461 defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_arm64_neon_sqsub>;
2462 defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_arm64_neon_srhadd>;
2463 defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_arm64_neon_srshl>;
2464 defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_arm64_neon_sshl>;
2465 defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
2466 defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
2467 TriOpFrag<(add node:$LHS, (int_arm64_neon_uabd node:$MHS, node:$RHS))> >;
2468 defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_arm64_neon_uabd>;
2469 defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_arm64_neon_uhadd>;
2470 defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_arm64_neon_uhsub>;
2471 defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_arm64_neon_umaxp>;
2472 defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", int_arm64_neon_umax>;
2473 defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_arm64_neon_uminp>;
2474 defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", int_arm64_neon_umin>;
2475 defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_arm64_neon_uqadd>;
2476 defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_arm64_neon_uqrshl>;
2477 defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_arm64_neon_uqshl>;
2478 defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_arm64_neon_uqsub>;
2479 defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_arm64_neon_urhadd>;
2480 defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_arm64_neon_urshl>;
2481 defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_arm64_neon_ushl>;
2483 defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
2484 defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
2485 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
2486 defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
2487 defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", ARM64bit>;
2488 defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
2489 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
2490 defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
2491 defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
2492 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
2493 defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
2495 def : Pat<(ARM64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
2496 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2497 def : Pat<(ARM64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
2498 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2499 def : Pat<(ARM64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
2500 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2501 def : Pat<(ARM64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
2502 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2504 def : Pat<(ARM64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
2505 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2506 def : Pat<(ARM64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
2507 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2508 def : Pat<(ARM64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
2509 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2510 def : Pat<(ARM64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
2511 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2513 // FIXME: the .16b and .8b variantes should be emitted by the
2514 // AsmWriter. TableGen's AsmWriter-generator doesn't deal with variant syntaxes
2515 // in aliases yet though.
2516 def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
2517 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2518 def : InstAlias<"{mov\t$dst.8h, $src.8h|mov.8h\t$dst, $src}",
2519 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2520 def : InstAlias<"{mov\t$dst.4s, $src.4s|mov.4s\t$dst, $src}",
2521 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2522 def : InstAlias<"{mov\t$dst.2d, $src.2d|mov.2d\t$dst, $src}",
2523 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2525 def : InstAlias<"{mov\t$dst.8b, $src.8b|mov.8b\t$dst, $src}",
2526 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2527 def : InstAlias<"{mov\t$dst.4h, $src.4h|mov.4h\t$dst, $src}",
2528 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2529 def : InstAlias<"{mov\t$dst.2s, $src.2s|mov.2s\t$dst, $src}",
2530 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2531 def : InstAlias<"{mov\t$dst.1d, $src.1d|mov.1d\t$dst, $src}",
2532 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2534 def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
2535 "|cmls.8b\t$dst, $src1, $src2}",
2536 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
2537 def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
2538 "|cmls.16b\t$dst, $src1, $src2}",
2539 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
2540 def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
2541 "|cmls.4h\t$dst, $src1, $src2}",
2542 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
2543 def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
2544 "|cmls.8h\t$dst, $src1, $src2}",
2545 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
2546 def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
2547 "|cmls.2s\t$dst, $src1, $src2}",
2548 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
2549 def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
2550 "|cmls.4s\t$dst, $src1, $src2}",
2551 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
2552 def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
2553 "|cmls.2d\t$dst, $src1, $src2}",
2554 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
2556 def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
2557 "|cmlo.8b\t$dst, $src1, $src2}",
2558 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
2559 def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
2560 "|cmlo.16b\t$dst, $src1, $src2}",
2561 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
2562 def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
2563 "|cmlo.4h\t$dst, $src1, $src2}",
2564 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
2565 def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
2566 "|cmlo.8h\t$dst, $src1, $src2}",
2567 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
2568 def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
2569 "|cmlo.2s\t$dst, $src1, $src2}",
2570 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
2571 def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
2572 "|cmlo.4s\t$dst, $src1, $src2}",
2573 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
2574 def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
2575 "|cmlo.2d\t$dst, $src1, $src2}",
2576 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
2578 def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
2579 "|cmle.8b\t$dst, $src1, $src2}",
2580 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
2581 def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
2582 "|cmle.16b\t$dst, $src1, $src2}",
2583 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
2584 def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
2585 "|cmle.4h\t$dst, $src1, $src2}",
2586 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
2587 def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
2588 "|cmle.8h\t$dst, $src1, $src2}",
2589 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
2590 def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
2591 "|cmle.2s\t$dst, $src1, $src2}",
2592 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
2593 def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
2594 "|cmle.4s\t$dst, $src1, $src2}",
2595 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
2596 def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
2597 "|cmle.2d\t$dst, $src1, $src2}",
2598 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
2600 def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
2601 "|cmlt.8b\t$dst, $src1, $src2}",
2602 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
2603 def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
2604 "|cmlt.16b\t$dst, $src1, $src2}",
2605 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
2606 def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
2607 "|cmlt.4h\t$dst, $src1, $src2}",
2608 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
2609 def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
2610 "|cmlt.8h\t$dst, $src1, $src2}",
2611 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
2612 def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
2613 "|cmlt.2s\t$dst, $src1, $src2}",
2614 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
2615 def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
2616 "|cmlt.4s\t$dst, $src1, $src2}",
2617 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
2618 def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
2619 "|cmlt.2d\t$dst, $src1, $src2}",
2620 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
2622 def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
2623 "|fcmle.2s\t$dst, $src1, $src2}",
2624 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
2625 def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
2626 "|fcmle.4s\t$dst, $src1, $src2}",
2627 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
2628 def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
2629 "|fcmle.2d\t$dst, $src1, $src2}",
2630 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
2632 def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
2633 "|fcmlt.2s\t$dst, $src1, $src2}",
2634 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
2635 def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
2636 "|fcmlt.4s\t$dst, $src1, $src2}",
2637 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
2638 def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
2639 "|fcmlt.2d\t$dst, $src1, $src2}",
2640 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
2642 def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
2643 "|facle.2s\t$dst, $src1, $src2}",
2644 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
2645 def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
2646 "|facle.4s\t$dst, $src1, $src2}",
2647 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
2648 def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
2649 "|facle.2d\t$dst, $src1, $src2}",
2650 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
2652 def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
2653 "|faclt.2s\t$dst, $src1, $src2}",
2654 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
2655 def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
2656 "|faclt.4s\t$dst, $src1, $src2}",
2657 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
2658 def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
2659 "|faclt.2d\t$dst, $src1, $src2}",
2660 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
2662 //===----------------------------------------------------------------------===//
2663 // Advanced SIMD three scalar instructions.
2664 //===----------------------------------------------------------------------===//
2666 defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
2667 defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", ARM64cmeq>;
2668 defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", ARM64cmge>;
2669 defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", ARM64cmgt>;
2670 defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", ARM64cmhi>;
2671 defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", ARM64cmhs>;
2672 defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", ARM64cmtst>;
2673 defm FABD : SIMDThreeScalarSD<1, 1, 0b11010, "fabd", int_arm64_sisd_fabd>;
2674 def : Pat<(v1f64 (int_arm64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2675 (FABD64 FPR64:$Rn, FPR64:$Rm)>;
2676 defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b11101, "facge",
2677 int_arm64_neon_facge>;
2678 defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b11101, "facgt",
2679 int_arm64_neon_facgt>;
2680 defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b11100, "fcmeq", ARM64fcmeq>;
2681 defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b11100, "fcmge", ARM64fcmge>;
2682 defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b11100, "fcmgt", ARM64fcmgt>;
2683 defm FMULX : SIMDThreeScalarSD<0, 0, 0b11011, "fmulx", int_arm64_neon_fmulx>;
2684 defm FRECPS : SIMDThreeScalarSD<0, 0, 0b11111, "frecps", int_arm64_neon_frecps>;
2685 defm FRSQRTS : SIMDThreeScalarSD<0, 1, 0b11111, "frsqrts", int_arm64_neon_frsqrts>;
2686 defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_arm64_neon_sqadd>;
2687 defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_arm64_neon_sqdmulh>;
2688 defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_arm64_neon_sqrdmulh>;
2689 defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_arm64_neon_sqrshl>;
2690 defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_arm64_neon_sqshl>;
2691 defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_arm64_neon_sqsub>;
2692 defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_arm64_neon_srshl>;
2693 defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_arm64_neon_sshl>;
2694 defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
2695 defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_arm64_neon_uqadd>;
2696 defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_arm64_neon_uqrshl>;
2697 defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_arm64_neon_uqshl>;
2698 defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_arm64_neon_uqsub>;
2699 defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_arm64_neon_urshl>;
2700 defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_arm64_neon_ushl>;
2702 def : InstAlias<"cmls $dst, $src1, $src2",
2703 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2704 def : InstAlias<"cmle $dst, $src1, $src2",
2705 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2706 def : InstAlias<"cmlo $dst, $src1, $src2",
2707 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2708 def : InstAlias<"cmlt $dst, $src1, $src2",
2709 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2710 def : InstAlias<"fcmle $dst, $src1, $src2",
2711 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1)>;
2712 def : InstAlias<"fcmle $dst, $src1, $src2",
2713 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2714 def : InstAlias<"fcmlt $dst, $src1, $src2",
2715 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1)>;
2716 def : InstAlias<"fcmlt $dst, $src1, $src2",
2717 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2718 def : InstAlias<"facle $dst, $src1, $src2",
2719 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1)>;
2720 def : InstAlias<"facle $dst, $src1, $src2",
2721 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2722 def : InstAlias<"faclt $dst, $src1, $src2",
2723 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1)>;
2724 def : InstAlias<"faclt $dst, $src1, $src2",
2725 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1)>;
2727 //===----------------------------------------------------------------------===//
2728 // Advanced SIMD three scalar instructions (mixed operands).
2729 //===----------------------------------------------------------------------===//
2730 defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
2731 int_arm64_neon_sqdmulls_scalar>;
2732 defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
2733 defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
2735 def : Pat<(i64 (int_arm64_neon_sqadd (i64 FPR64:$Rd),
2736 (i64 (int_arm64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
2737 (i32 FPR32:$Rm))))),
2738 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
2739 def : Pat<(i64 (int_arm64_neon_sqsub (i64 FPR64:$Rd),
2740 (i64 (int_arm64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
2741 (i32 FPR32:$Rm))))),
2742 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
2744 //===----------------------------------------------------------------------===//
2745 // Advanced SIMD two scalar instructions.
2746 //===----------------------------------------------------------------------===//
2748 defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_arm64_neon_abs>;
2749 defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", ARM64cmeqz>;
2750 defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", ARM64cmgez>;
2751 defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", ARM64cmgtz>;
2752 defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", ARM64cmlez>;
2753 defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", ARM64cmltz>;
2754 defm FCMEQ : SIMDCmpTwoScalarSD<0, 1, 0b01101, "fcmeq", ARM64fcmeqz>;
2755 defm FCMGE : SIMDCmpTwoScalarSD<1, 1, 0b01100, "fcmge", ARM64fcmgez>;
2756 defm FCMGT : SIMDCmpTwoScalarSD<0, 1, 0b01100, "fcmgt", ARM64fcmgtz>;
2757 defm FCMLE : SIMDCmpTwoScalarSD<1, 1, 0b01101, "fcmle", ARM64fcmlez>;
2758 defm FCMLT : SIMDCmpTwoScalarSD<0, 1, 0b01110, "fcmlt", ARM64fcmltz>;
2759 defm FCVTAS : SIMDTwoScalarSD< 0, 0, 0b11100, "fcvtas">;
2760 defm FCVTAU : SIMDTwoScalarSD< 1, 0, 0b11100, "fcvtau">;
2761 defm FCVTMS : SIMDTwoScalarSD< 0, 0, 0b11011, "fcvtms">;
2762 defm FCVTMU : SIMDTwoScalarSD< 1, 0, 0b11011, "fcvtmu">;
2763 defm FCVTNS : SIMDTwoScalarSD< 0, 0, 0b11010, "fcvtns">;
2764 defm FCVTNU : SIMDTwoScalarSD< 1, 0, 0b11010, "fcvtnu">;
2765 defm FCVTPS : SIMDTwoScalarSD< 0, 1, 0b11010, "fcvtps">;
2766 defm FCVTPU : SIMDTwoScalarSD< 1, 1, 0b11010, "fcvtpu">;
2767 def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
2768 defm FCVTZS : SIMDTwoScalarSD< 0, 1, 0b11011, "fcvtzs">;
2769 defm FCVTZU : SIMDTwoScalarSD< 1, 1, 0b11011, "fcvtzu">;
2770 defm FRECPE : SIMDTwoScalarSD< 0, 1, 0b11101, "frecpe">;
2771 defm FRECPX : SIMDTwoScalarSD< 0, 1, 0b11111, "frecpx">;
2772 defm FRSQRTE : SIMDTwoScalarSD< 1, 1, 0b11101, "frsqrte">;
2773 defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
2774 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
2775 defm SCVTF : SIMDTwoScalarCVTSD< 0, 0, 0b11101, "scvtf", ARM64sitof>;
2776 defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_arm64_neon_sqabs>;
2777 defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_arm64_neon_sqneg>;
2778 defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_arm64_neon_scalar_sqxtn>;
2779 defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_arm64_neon_scalar_sqxtun>;
2780 defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
2781 int_arm64_neon_suqadd>;
2782 defm UCVTF : SIMDTwoScalarCVTSD< 1, 0, 0b11101, "ucvtf", ARM64uitof>;
2783 defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_arm64_neon_scalar_uqxtn>;
2784 defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
2785 int_arm64_neon_usqadd>;
2787 def : Pat<(ARM64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
2789 def : Pat<(v1i64 (int_arm64_neon_fcvtas (v1f64 FPR64:$Rn))),
2790 (FCVTASv1i64 FPR64:$Rn)>;
2791 def : Pat<(v1i64 (int_arm64_neon_fcvtau (v1f64 FPR64:$Rn))),
2792 (FCVTAUv1i64 FPR64:$Rn)>;
2793 def : Pat<(v1i64 (int_arm64_neon_fcvtms (v1f64 FPR64:$Rn))),
2794 (FCVTMSv1i64 FPR64:$Rn)>;
2795 def : Pat<(v1i64 (int_arm64_neon_fcvtmu (v1f64 FPR64:$Rn))),
2796 (FCVTMUv1i64 FPR64:$Rn)>;
2797 def : Pat<(v1i64 (int_arm64_neon_fcvtns (v1f64 FPR64:$Rn))),
2798 (FCVTNSv1i64 FPR64:$Rn)>;
2799 def : Pat<(v1i64 (int_arm64_neon_fcvtnu (v1f64 FPR64:$Rn))),
2800 (FCVTNUv1i64 FPR64:$Rn)>;
2801 def : Pat<(v1i64 (int_arm64_neon_fcvtps (v1f64 FPR64:$Rn))),
2802 (FCVTPSv1i64 FPR64:$Rn)>;
2803 def : Pat<(v1i64 (int_arm64_neon_fcvtpu (v1f64 FPR64:$Rn))),
2804 (FCVTPUv1i64 FPR64:$Rn)>;
2806 def : Pat<(f32 (int_arm64_neon_frecpe (f32 FPR32:$Rn))),
2807 (FRECPEv1i32 FPR32:$Rn)>;
2808 def : Pat<(f64 (int_arm64_neon_frecpe (f64 FPR64:$Rn))),
2809 (FRECPEv1i64 FPR64:$Rn)>;
2810 def : Pat<(v1f64 (int_arm64_neon_frecpe (v1f64 FPR64:$Rn))),
2811 (FRECPEv1i64 FPR64:$Rn)>;
2813 def : Pat<(f32 (int_arm64_neon_frecpx (f32 FPR32:$Rn))),
2814 (FRECPXv1i32 FPR32:$Rn)>;
2815 def : Pat<(f64 (int_arm64_neon_frecpx (f64 FPR64:$Rn))),
2816 (FRECPXv1i64 FPR64:$Rn)>;
2818 def : Pat<(f32 (int_arm64_neon_frsqrte (f32 FPR32:$Rn))),
2819 (FRSQRTEv1i32 FPR32:$Rn)>;
2820 def : Pat<(f64 (int_arm64_neon_frsqrte (f64 FPR64:$Rn))),
2821 (FRSQRTEv1i64 FPR64:$Rn)>;
2822 def : Pat<(v1f64 (int_arm64_neon_frsqrte (v1f64 FPR64:$Rn))),
2823 (FRSQRTEv1i64 FPR64:$Rn)>;
2825 // If an integer is about to be converted to a floating point value,
2826 // just load it on the floating point unit.
2827 // Here are the patterns for 8 and 16-bits to float.
2829 def : Pat <(f32 (uint_to_fp (i32 (zextloadi8 ro_indexed8:$addr)))),
2830 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
2831 (LDRBro ro_indexed8:$addr), bsub))>;
2832 def : Pat <(f32 (uint_to_fp (i32 (zextloadi8 am_indexed8:$addr)))),
2833 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
2834 (LDRBui am_indexed8:$addr), bsub))>;
2835 def : Pat <(f32 (uint_to_fp (i32 (zextloadi8 am_unscaled8:$addr)))),
2836 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
2837 (LDURBi am_unscaled8:$addr), bsub))>;
2838 // 16-bits -> float.
2839 def : Pat <(f32 (uint_to_fp (i32 (zextloadi16 ro_indexed16:$addr)))),
2840 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
2841 (LDRHro ro_indexed16:$addr), hsub))>;
2842 def : Pat <(f32 (uint_to_fp (i32 (zextloadi16 am_indexed16:$addr)))),
2843 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
2844 (LDRHui am_indexed16:$addr), hsub))>;
2845 def : Pat <(f32 (uint_to_fp (i32 (zextloadi16 am_unscaled16:$addr)))),
2846 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
2847 (LDURHi am_unscaled16:$addr), hsub))>;
2848 // 32-bits are handled in target specific dag combine:
2849 // performIntToFpCombine.
2850 // 64-bits integer to 32-bits floating point, not possible with
2851 // UCVTF on floating point registers (both source and destination
2852 // must have the same size).
2854 // Here are the patterns for 8, 16, 32, and 64-bits to double.
2855 // 8-bits -> double.
2856 def : Pat <(f64 (uint_to_fp (i32 (zextloadi8 ro_indexed8:$addr)))),
2857 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2858 (LDRBro ro_indexed8:$addr), bsub))>;
2859 def : Pat <(f64 (uint_to_fp (i32 (zextloadi8 am_indexed8:$addr)))),
2860 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2861 (LDRBui am_indexed8:$addr), bsub))>;
2862 def : Pat <(f64 (uint_to_fp (i32 (zextloadi8 am_unscaled8:$addr)))),
2863 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2864 (LDURBi am_unscaled8:$addr), bsub))>;
2865 // 16-bits -> double.
2866 def : Pat <(f64 (uint_to_fp (i32 (zextloadi16 ro_indexed16:$addr)))),
2867 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2868 (LDRHro ro_indexed16:$addr), hsub))>;
2869 def : Pat <(f64 (uint_to_fp (i32 (zextloadi16 am_indexed16:$addr)))),
2870 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2871 (LDRHui am_indexed16:$addr), hsub))>;
2872 def : Pat <(f64 (uint_to_fp (i32 (zextloadi16 am_unscaled16:$addr)))),
2873 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2874 (LDURHi am_unscaled16:$addr), hsub))>;
2875 // 32-bits -> double.
2876 def : Pat <(f64 (uint_to_fp (i32 (load ro_indexed32:$addr)))),
2877 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2878 (LDRSro ro_indexed32:$addr), ssub))>;
2879 def : Pat <(f64 (uint_to_fp (i32 (load am_indexed32:$addr)))),
2880 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2881 (LDRSui am_indexed32:$addr), ssub))>;
2882 def : Pat <(f64 (uint_to_fp (i32 (load am_unscaled32:$addr)))),
2883 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
2884 (LDURSi am_unscaled32:$addr), ssub))>;
2885 // 64-bits -> double are handled in target specific dag combine:
2886 // performIntToFpCombine.
2888 //===----------------------------------------------------------------------===//
2889 // Advanced SIMD three different-sized vector instructions.
2890 //===----------------------------------------------------------------------===//
2892 defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_arm64_neon_addhn>;
2893 defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_arm64_neon_subhn>;
2894 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_arm64_neon_raddhn>;
2895 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_arm64_neon_rsubhn>;
2896 defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_arm64_neon_pmull>;
2897 defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
2898 int_arm64_neon_sabd>;
2899 defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
2900 int_arm64_neon_sabd>;
2901 defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
2902 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
2903 defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
2904 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
2905 defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
2906 TriOpFrag<(add node:$LHS, (int_arm64_neon_smull node:$MHS, node:$RHS))>>;
2907 defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
2908 TriOpFrag<(sub node:$LHS, (int_arm64_neon_smull node:$MHS, node:$RHS))>>;
2909 defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_arm64_neon_smull>;
2910 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
2911 int_arm64_neon_sqadd>;
2912 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
2913 int_arm64_neon_sqsub>;
2914 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
2915 int_arm64_neon_sqdmull>;
2916 defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
2917 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
2918 defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
2919 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
2920 defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
2921 int_arm64_neon_uabd>;
2922 defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
2923 int_arm64_neon_uabd>;
2924 defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
2925 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
2926 defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
2927 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
2928 defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
2929 TriOpFrag<(add node:$LHS, (int_arm64_neon_umull node:$MHS, node:$RHS))>>;
2930 defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
2931 TriOpFrag<(sub node:$LHS, (int_arm64_neon_umull node:$MHS, node:$RHS))>>;
2932 defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_arm64_neon_umull>;
2933 defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
2934 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
2935 defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
2936 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
2938 // Patterns for 64-bit pmull
2939 def : Pat<(int_arm64_neon_pmull64 V64:$Rn, V64:$Rm),
2940 (PMULLv1i64 V64:$Rn, V64:$Rm)>;
2941 def : Pat<(int_arm64_neon_pmull64 (vector_extract (v2i64 V128:$Rn), (i64 1)),
2942 (vector_extract (v2i64 V128:$Rm), (i64 1))),
2943 (PMULLv2i64 V128:$Rn, V128:$Rm)>;
2945 // CodeGen patterns for addhn and subhn instructions, which can actually be
2946 // written in LLVM IR without too much difficulty.
2949 def : Pat<(v8i8 (trunc (v8i16 (ARM64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
2950 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
2951 def : Pat<(v4i16 (trunc (v4i32 (ARM64vlshr (add V128:$Rn, V128:$Rm),
2953 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
2954 def : Pat<(v2i32 (trunc (v2i64 (ARM64vlshr (add V128:$Rn, V128:$Rm),
2956 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
2957 def : Pat<(concat_vectors (v8i8 V64:$Rd),
2958 (trunc (v8i16 (ARM64vlshr (add V128:$Rn, V128:$Rm),
2960 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
2961 V128:$Rn, V128:$Rm)>;
2962 def : Pat<(concat_vectors (v4i16 V64:$Rd),
2963 (trunc (v4i32 (ARM64vlshr (add V128:$Rn, V128:$Rm),
2965 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
2966 V128:$Rn, V128:$Rm)>;
2967 def : Pat<(concat_vectors (v2i32 V64:$Rd),
2968 (trunc (v2i64 (ARM64vlshr (add V128:$Rn, V128:$Rm),
2970 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
2971 V128:$Rn, V128:$Rm)>;
2974 def : Pat<(v8i8 (trunc (v8i16 (ARM64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
2975 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
2976 def : Pat<(v4i16 (trunc (v4i32 (ARM64vlshr (sub V128:$Rn, V128:$Rm),
2978 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
2979 def : Pat<(v2i32 (trunc (v2i64 (ARM64vlshr (sub V128:$Rn, V128:$Rm),
2981 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
2982 def : Pat<(concat_vectors (v8i8 V64:$Rd),
2983 (trunc (v8i16 (ARM64vlshr (sub V128:$Rn, V128:$Rm),
2985 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
2986 V128:$Rn, V128:$Rm)>;
2987 def : Pat<(concat_vectors (v4i16 V64:$Rd),
2988 (trunc (v4i32 (ARM64vlshr (sub V128:$Rn, V128:$Rm),
2990 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
2991 V128:$Rn, V128:$Rm)>;
2992 def : Pat<(concat_vectors (v2i32 V64:$Rd),
2993 (trunc (v2i64 (ARM64vlshr (sub V128:$Rn, V128:$Rm),
2995 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
2996 V128:$Rn, V128:$Rm)>;
2998 //----------------------------------------------------------------------------
2999 // AdvSIMD bitwise extract from vector instruction.
3000 //----------------------------------------------------------------------------
3002 defm EXT : SIMDBitwiseExtract<"ext">;
3004 def : Pat<(v4i16 (ARM64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3005 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3006 def : Pat<(v8i16 (ARM64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3007 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3008 def : Pat<(v2i32 (ARM64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3009 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3010 def : Pat<(v2f32 (ARM64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3011 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3012 def : Pat<(v4i32 (ARM64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3013 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3014 def : Pat<(v4f32 (ARM64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3015 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3016 def : Pat<(v2i64 (ARM64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3017 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3018 def : Pat<(v2f64 (ARM64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3019 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3021 // We use EXT to handle extract_subvector to copy the upper 64-bits of a
3023 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 8))),
3024 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3025 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 4))),
3026 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3027 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 2))),
3028 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3029 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 1))),
3030 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3031 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 2))),
3032 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3033 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 1))),
3034 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3037 //----------------------------------------------------------------------------
3038 // AdvSIMD zip vector
3039 //----------------------------------------------------------------------------
3041 defm TRN1 : SIMDZipVector<0b010, "trn1", ARM64trn1>;
3042 defm TRN2 : SIMDZipVector<0b110, "trn2", ARM64trn2>;
3043 defm UZP1 : SIMDZipVector<0b001, "uzp1", ARM64uzp1>;
3044 defm UZP2 : SIMDZipVector<0b101, "uzp2", ARM64uzp2>;
3045 defm ZIP1 : SIMDZipVector<0b011, "zip1", ARM64zip1>;
3046 defm ZIP2 : SIMDZipVector<0b111, "zip2", ARM64zip2>;
3048 //----------------------------------------------------------------------------
3049 // AdvSIMD TBL/TBX instructions
3050 //----------------------------------------------------------------------------
3052 defm TBL : SIMDTableLookup< 0, "tbl">;
3053 defm TBX : SIMDTableLookupTied<1, "tbx">;
3055 def : Pat<(v8i8 (int_arm64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3056 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
3057 def : Pat<(v16i8 (int_arm64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3058 (TBLv16i8One V128:$Ri, V128:$Rn)>;
3060 def : Pat<(v8i8 (int_arm64_neon_tbx1 (v8i8 V64:$Rd),
3061 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3062 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
3063 def : Pat<(v16i8 (int_arm64_neon_tbx1 (v16i8 V128:$Rd),
3064 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3065 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
3068 //----------------------------------------------------------------------------
3069 // AdvSIMD scalar CPY instruction
3070 //----------------------------------------------------------------------------
3072 defm CPY : SIMDScalarCPY<"cpy">;
3074 //----------------------------------------------------------------------------
3075 // AdvSIMD scalar pairwise instructions
3076 //----------------------------------------------------------------------------
3078 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
3079 defm FADDP : SIMDPairwiseScalarSD<1, 0, 0b01101, "faddp">;
3080 defm FMAXNMP : SIMDPairwiseScalarSD<1, 0, 0b01100, "fmaxnmp">;
3081 defm FMAXP : SIMDPairwiseScalarSD<1, 0, 0b01111, "fmaxp">;
3082 defm FMINNMP : SIMDPairwiseScalarSD<1, 1, 0b01100, "fminnmp">;
3083 defm FMINP : SIMDPairwiseScalarSD<1, 1, 0b01111, "fminp">;
3084 def : Pat<(i64 (int_arm64_neon_saddv (v2i64 V128:$Rn))),
3085 (ADDPv2i64p V128:$Rn)>;
3086 def : Pat<(i64 (int_arm64_neon_uaddv (v2i64 V128:$Rn))),
3087 (ADDPv2i64p V128:$Rn)>;
3088 def : Pat<(f32 (int_arm64_neon_faddv (v2f32 V64:$Rn))),
3089 (FADDPv2i32p V64:$Rn)>;
3090 def : Pat<(f32 (int_arm64_neon_faddv (v4f32 V128:$Rn))),
3091 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
3092 def : Pat<(f64 (int_arm64_neon_faddv (v2f64 V128:$Rn))),
3093 (FADDPv2i64p V128:$Rn)>;
3094 def : Pat<(f32 (int_arm64_neon_fmaxnmv (v2f32 V64:$Rn))),
3095 (FMAXNMPv2i32p V64:$Rn)>;
3096 def : Pat<(f64 (int_arm64_neon_fmaxnmv (v2f64 V128:$Rn))),
3097 (FMAXNMPv2i64p V128:$Rn)>;
3098 def : Pat<(f32 (int_arm64_neon_fmaxv (v2f32 V64:$Rn))),
3099 (FMAXPv2i32p V64:$Rn)>;
3100 def : Pat<(f64 (int_arm64_neon_fmaxv (v2f64 V128:$Rn))),
3101 (FMAXPv2i64p V128:$Rn)>;
3102 def : Pat<(f32 (int_arm64_neon_fminnmv (v2f32 V64:$Rn))),
3103 (FMINNMPv2i32p V64:$Rn)>;
3104 def : Pat<(f64 (int_arm64_neon_fminnmv (v2f64 V128:$Rn))),
3105 (FMINNMPv2i64p V128:$Rn)>;
3106 def : Pat<(f32 (int_arm64_neon_fminv (v2f32 V64:$Rn))),
3107 (FMINPv2i32p V64:$Rn)>;
3108 def : Pat<(f64 (int_arm64_neon_fminv (v2f64 V128:$Rn))),
3109 (FMINPv2i64p V128:$Rn)>;
3111 //----------------------------------------------------------------------------
3112 // AdvSIMD INS/DUP instructions
3113 //----------------------------------------------------------------------------
3115 def DUPv8i8gpr : SIMDDupFromMain<0, 0b00001, ".8b", v8i8, V64, GPR32>;
3116 def DUPv16i8gpr : SIMDDupFromMain<1, 0b00001, ".16b", v16i8, V128, GPR32>;
3117 def DUPv4i16gpr : SIMDDupFromMain<0, 0b00010, ".4h", v4i16, V64, GPR32>;
3118 def DUPv8i16gpr : SIMDDupFromMain<1, 0b00010, ".8h", v8i16, V128, GPR32>;
3119 def DUPv2i32gpr : SIMDDupFromMain<0, 0b00100, ".2s", v2i32, V64, GPR32>;
3120 def DUPv4i32gpr : SIMDDupFromMain<1, 0b00100, ".4s", v4i32, V128, GPR32>;
3121 def DUPv2i64gpr : SIMDDupFromMain<1, 0b01000, ".2d", v2i64, V128, GPR64>;
3123 def DUPv2i64lane : SIMDDup64FromElement;
3124 def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
3125 def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
3126 def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
3127 def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
3128 def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
3129 def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
3131 def : Pat<(v2f32 (ARM64dup (f32 FPR32:$Rn))),
3132 (v2f32 (DUPv2i32lane
3133 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3135 def : Pat<(v4f32 (ARM64dup (f32 FPR32:$Rn))),
3136 (v4f32 (DUPv4i32lane
3137 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3139 def : Pat<(v2f64 (ARM64dup (f64 FPR64:$Rn))),
3140 (v2f64 (DUPv2i64lane
3141 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
3144 def : Pat<(v2f32 (ARM64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3145 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
3146 def : Pat<(v4f32 (ARM64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3147 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
3148 def : Pat<(v2f64 (ARM64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
3149 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
3151 // If there's an (ARM64dup (vector_extract ...) ...), we can use a duplane
3152 // instruction even if the types don't match: we just have to remap the lane
3153 // carefully. N.b. this trick only applies to truncations.
3154 def VecIndex_x2 : SDNodeXForm<imm, [{
3155 return CurDAG->getTargetConstant(2 * N->getZExtValue(), MVT::i64);
3157 def VecIndex_x4 : SDNodeXForm<imm, [{
3158 return CurDAG->getTargetConstant(4 * N->getZExtValue(), MVT::i64);
3160 def VecIndex_x8 : SDNodeXForm<imm, [{
3161 return CurDAG->getTargetConstant(8 * N->getZExtValue(), MVT::i64);
3164 multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
3165 ValueType Src128VT, ValueType ScalVT,
3166 Instruction DUP, SDNodeXForm IdxXFORM> {
3167 def : Pat<(ResVT (ARM64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
3169 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3171 def : Pat<(ResVT (ARM64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
3173 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3176 defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
3177 defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
3178 defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
3180 defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
3181 defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
3182 defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
3184 multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
3185 SDNodeXForm IdxXFORM> {
3186 def : Pat<(ResVT (ARM64dup (i32 (trunc (vector_extract (v2i64 V128:$Rn),
3188 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3190 def : Pat<(ResVT (ARM64dup (i32 (trunc (vector_extract (v1i64 V64:$Rn),
3192 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3195 defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
3196 defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
3197 defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
3199 defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
3200 defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
3201 defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
3203 // SMOV and UMOV definitions, with some extra patterns for convenience
3207 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3208 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
3209 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3210 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
3211 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3212 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3213 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3214 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
3215 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3216 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3217 def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
3218 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
3220 // Extracting i8 or i16 elements will have the zero-extend transformed to
3221 // an 'and' mask by type legalization since neither i8 nor i16 are legal types
3222 // for ARM64. Match these patterns here since UMOV already zeroes out the high
3223 // bits of the destination register.
3224 def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
3226 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
3227 def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
3229 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
3233 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
3234 (SUBREG_TO_REG (i32 0),
3235 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3236 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
3237 (SUBREG_TO_REG (i32 0),
3238 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3240 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
3241 (SUBREG_TO_REG (i32 0),
3242 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3243 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
3244 (SUBREG_TO_REG (i32 0),
3245 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3247 def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
3248 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
3249 (i32 FPR32:$Rn), ssub))>;
3250 def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
3251 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
3252 (i32 FPR32:$Rn), ssub))>;
3253 def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
3254 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
3255 (i64 FPR64:$Rn), dsub))>;
3257 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
3258 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
3259 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
3260 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
3261 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
3262 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
3264 def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
3265 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
3268 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
3270 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
3273 def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
3274 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
3276 V128:$Rn, VectorIndexS:$imm,
3277 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
3279 def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
3280 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
3282 V128:$Rn, VectorIndexD:$imm,
3283 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
3286 // Copy an element at a constant index in one vector into a constant indexed
3287 // element of another.
3288 // FIXME refactor to a shared class/dev parameterized on vector type, vector
3289 // index type and INS extension
3290 def : Pat<(v16i8 (int_arm64_neon_vcopy_lane
3291 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
3292 VectorIndexB:$idx2)),
3294 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
3296 def : Pat<(v8i16 (int_arm64_neon_vcopy_lane
3297 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
3298 VectorIndexH:$idx2)),
3300 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
3302 def : Pat<(v4i32 (int_arm64_neon_vcopy_lane
3303 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
3304 VectorIndexS:$idx2)),
3306 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
3308 def : Pat<(v2i64 (int_arm64_neon_vcopy_lane
3309 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
3310 VectorIndexD:$idx2)),
3312 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
3315 multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
3316 ValueType VTScal, Instruction INS> {
3317 def : Pat<(VT128 (vector_insert V128:$src,
3318 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
3320 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
3322 def : Pat<(VT128 (vector_insert V128:$src,
3323 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
3325 (INS V128:$src, imm:$Immd,
3326 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
3328 def : Pat<(VT64 (vector_insert V64:$src,
3329 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
3331 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
3332 imm:$Immd, V128:$Rn, imm:$Immn),
3335 def : Pat<(VT64 (vector_insert V64:$src,
3336 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
3339 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
3340 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
3344 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
3345 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
3346 defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, INSvi8lane>;
3347 defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, INSvi16lane>;
3348 defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, INSvi32lane>;
3349 defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, INSvi32lane>;
3352 // Floating point vector extractions are codegen'd as either a sequence of
3353 // subregister extractions, possibly fed by an INS if the lane number is
3354 // anything other than zero.
3355 def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
3356 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
3357 def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
3358 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
3359 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
3360 (f64 (EXTRACT_SUBREG
3361 (INSvi64lane (v2f64 (IMPLICIT_DEF)), 0,
3362 V128:$Rn, VectorIndexD:$idx),
3364 def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
3365 (f32 (EXTRACT_SUBREG
3366 (INSvi32lane (v4f32 (IMPLICIT_DEF)), 0,
3367 V128:$Rn, VectorIndexS:$idx),
3370 // All concat_vectors operations are canonicalised to act on i64 vectors for
3371 // ARM64. In the general case we need an instruction, which had just as well be
3373 class ConcatPat<ValueType DstTy, ValueType SrcTy>
3374 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
3375 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
3376 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
3378 def : ConcatPat<v2i64, v1i64>;
3379 def : ConcatPat<v2f64, v1f64>;
3380 def : ConcatPat<v4i32, v2i32>;
3381 def : ConcatPat<v4f32, v2f32>;
3382 def : ConcatPat<v8i16, v4i16>;
3383 def : ConcatPat<v16i8, v8i8>;
3385 // If the high lanes are undef, though, we can just ignore them:
3386 class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
3387 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
3388 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
3390 def : ConcatUndefPat<v2i64, v1i64>;
3391 def : ConcatUndefPat<v2f64, v1f64>;
3392 def : ConcatUndefPat<v4i32, v2i32>;
3393 def : ConcatUndefPat<v4f32, v2f32>;
3394 def : ConcatUndefPat<v8i16, v4i16>;
3395 def : ConcatUndefPat<v16i8, v8i8>;
3397 //----------------------------------------------------------------------------
3398 // AdvSIMD across lanes instructions
3399 //----------------------------------------------------------------------------
3401 defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
3402 defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
3403 defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
3404 defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
3405 defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
3406 defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
3407 defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
3408 defm FMAXNMV : SIMDAcrossLanesS<0b01100, 0, "fmaxnmv", int_arm64_neon_fmaxnmv>;
3409 defm FMAXV : SIMDAcrossLanesS<0b01111, 0, "fmaxv", int_arm64_neon_fmaxv>;
3410 defm FMINNMV : SIMDAcrossLanesS<0b01100, 1, "fminnmv", int_arm64_neon_fminnmv>;
3411 defm FMINV : SIMDAcrossLanesS<0b01111, 1, "fminv", int_arm64_neon_fminv>;
3413 multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc, Intrinsic intOp> {
3414 // If there is a sign extension after this intrinsic, consume it as smov already
3416 def : Pat<(i32 (sext_inreg (i32 (intOp (v8i8 V64:$Rn))), i8)),
3418 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3419 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
3421 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
3423 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3424 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
3426 // If there is a sign extension after this intrinsic, consume it as smov already
3428 def : Pat<(i32 (sext_inreg (i32 (intOp (v16i8 V128:$Rn))), i8)),
3430 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3431 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
3433 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
3435 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3436 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
3438 // If there is a sign extension after this intrinsic, consume it as smov already
3440 def : Pat<(i32 (sext_inreg (i32 (intOp (v4i16 V64:$Rn))), i16)),
3442 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3443 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
3445 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
3447 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3448 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
3450 // If there is a sign extension after this intrinsic, consume it as smov already
3452 def : Pat<(i32 (sext_inreg (i32 (intOp (v8i16 V128:$Rn))), i16)),
3454 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3455 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
3457 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
3459 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3460 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
3463 def : Pat<(i32 (intOp (v4i32 V128:$Rn))),
3464 (i32 (EXTRACT_SUBREG
3465 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3466 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub),
3470 multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc, Intrinsic intOp> {
3471 // If there is a masking operation keeping only what has been actually
3472 // generated, consume it.
3473 def : Pat<(i32 (and (i32 (intOp (v8i8 V64:$Rn))), maski8_or_more)),
3474 (i32 (EXTRACT_SUBREG
3475 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3476 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
3478 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
3479 (i32 (EXTRACT_SUBREG
3480 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3481 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
3483 // If there is a masking operation keeping only what has been actually
3484 // generated, consume it.
3485 def : Pat<(i32 (and (i32 (intOp (v16i8 V128:$Rn))), maski8_or_more)),
3486 (i32 (EXTRACT_SUBREG
3487 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3488 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
3490 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
3491 (i32 (EXTRACT_SUBREG
3492 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3493 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
3496 // If there is a masking operation keeping only what has been actually
3497 // generated, consume it.
3498 def : Pat<(i32 (and (i32 (intOp (v4i16 V64:$Rn))), maski16_or_more)),
3499 (i32 (EXTRACT_SUBREG
3500 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3501 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
3503 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
3504 (i32 (EXTRACT_SUBREG
3505 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3506 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
3508 // If there is a masking operation keeping only what has been actually
3509 // generated, consume it.
3510 def : Pat<(i32 (and (i32 (intOp (v8i16 V128:$Rn))), maski16_or_more)),
3511 (i32 (EXTRACT_SUBREG
3512 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3513 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
3515 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
3516 (i32 (EXTRACT_SUBREG
3517 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3518 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
3521 def : Pat<(i32 (intOp (v4i32 V128:$Rn))),
3522 (i32 (EXTRACT_SUBREG
3523 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3524 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub),
3529 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
3530 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
3532 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3533 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
3535 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
3537 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3538 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
3541 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
3542 (i32 (EXTRACT_SUBREG
3543 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3544 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
3546 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
3547 (i32 (EXTRACT_SUBREG
3548 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3549 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
3552 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
3553 (i64 (EXTRACT_SUBREG
3554 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3555 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
3559 multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
3561 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
3562 (i32 (EXTRACT_SUBREG
3563 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3564 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
3566 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
3567 (i32 (EXTRACT_SUBREG
3568 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3569 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
3572 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
3573 (i32 (EXTRACT_SUBREG
3574 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3575 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
3577 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
3578 (i32 (EXTRACT_SUBREG
3579 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3580 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
3583 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
3584 (i64 (EXTRACT_SUBREG
3585 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3586 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
3590 defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", int_arm64_neon_saddv>;
3591 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
3592 def : Pat<(i32 (int_arm64_neon_saddv (v2i32 V64:$Rn))),
3593 (EXTRACT_SUBREG (ADDPv2i32 V64:$Rn, V64:$Rn), ssub)>;
3595 defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", int_arm64_neon_uaddv>;
3596 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
3597 def : Pat<(i32 (int_arm64_neon_uaddv (v2i32 V64:$Rn))),
3598 (EXTRACT_SUBREG (ADDPv2i32 V64:$Rn, V64:$Rn), ssub)>;
3600 defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", int_arm64_neon_smaxv>;
3601 def : Pat<(i32 (int_arm64_neon_smaxv (v2i32 V64:$Rn))),
3602 (EXTRACT_SUBREG (SMAXPv2i32 V64:$Rn, V64:$Rn), ssub)>;
3604 defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", int_arm64_neon_sminv>;
3605 def : Pat<(i32 (int_arm64_neon_sminv (v2i32 V64:$Rn))),
3606 (EXTRACT_SUBREG (SMINPv2i32 V64:$Rn, V64:$Rn), ssub)>;
3608 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", int_arm64_neon_umaxv>;
3609 def : Pat<(i32 (int_arm64_neon_umaxv (v2i32 V64:$Rn))),
3610 (EXTRACT_SUBREG (UMAXPv2i32 V64:$Rn, V64:$Rn), ssub)>;
3612 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", int_arm64_neon_uminv>;
3613 def : Pat<(i32 (int_arm64_neon_uminv (v2i32 V64:$Rn))),
3614 (EXTRACT_SUBREG (UMINPv2i32 V64:$Rn, V64:$Rn), ssub)>;
3616 defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_arm64_neon_saddlv>;
3617 defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_arm64_neon_uaddlv>;
3619 // The vaddlv_s32 intrinsic gets mapped to SADDLP.
3620 def : Pat<(i64 (int_arm64_neon_saddlv (v2i32 V64:$Rn))),
3621 (i64 (EXTRACT_SUBREG
3622 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3623 (SADDLPv2i32_v1i64 V64:$Rn), dsub),
3625 // The vaddlv_u32 intrinsic gets mapped to UADDLP.
3626 def : Pat<(i64 (int_arm64_neon_uaddlv (v2i32 V64:$Rn))),
3627 (i64 (EXTRACT_SUBREG
3628 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3629 (UADDLPv2i32_v1i64 V64:$Rn), dsub),
3632 //------------------------------------------------------------------------------
3633 // AdvSIMD modified immediate instructions
3634 //------------------------------------------------------------------------------
3637 defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", ARM64bici>;
3639 defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", ARM64orri>;
3643 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1111, V128, fpimm8,
3645 [(set (v2f64 V128:$Rd), (ARM64fmov imm0_255:$imm8))]>;
3646 def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0b1111, V64, fpimm8,
3648 [(set (v2f32 V64:$Rd), (ARM64fmov imm0_255:$imm8))]>;
3649 def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1111, V128, fpimm8,
3651 [(set (v4f32 V128:$Rd), (ARM64fmov imm0_255:$imm8))]>;
3655 // EDIT byte mask: scalar
3656 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
3657 def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
3658 [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
3659 // The movi_edit node has the immediate value already encoded, so we use
3660 // a plain imm0_255 here.
3661 def : Pat<(f64 (ARM64movi_edit imm0_255:$shift)),
3662 (MOVID imm0_255:$shift)>;
3664 def : Pat<(v1i64 immAllZerosV), (MOVID (i32 0))>;
3665 def : Pat<(v2i32 immAllZerosV), (MOVID (i32 0))>;
3666 def : Pat<(v4i16 immAllZerosV), (MOVID (i32 0))>;
3667 def : Pat<(v8i8 immAllZerosV), (MOVID (i32 0))>;
3669 def : Pat<(v1i64 immAllOnesV), (MOVID (i32 255))>;
3670 def : Pat<(v2i32 immAllOnesV), (MOVID (i32 255))>;
3671 def : Pat<(v4i16 immAllOnesV), (MOVID (i32 255))>;
3672 def : Pat<(v8i8 immAllOnesV), (MOVID (i32 255))>;
3674 // EDIT byte mask: 2d
3676 // The movi_edit node has the immediate value already encoded, so we use
3677 // a plain imm0_255 in the pattern
3678 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
3679 def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1110, V128,
3682 [(set (v2i64 V128:$Rd), (ARM64movi_edit imm0_255:$imm8))]>;
3685 // Use movi.2d to materialize 0.0 if the HW does zero-cycle zeroing.
3686 // Complexity is added to break a tie with a plain MOVI.
3687 let AddedComplexity = 1 in {
3688 def : Pat<(f32 fpimm0),
3689 (f32 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), ssub))>,
3691 def : Pat<(f64 fpimm0),
3692 (f64 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), dsub))>,
3696 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
3697 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
3698 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
3699 def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
3701 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
3702 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
3703 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
3704 def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
3706 def : Pat<(v2f64 (ARM64dup (f64 fpimm0))), (MOVIv2d_ns (i32 0))>;
3707 def : Pat<(v4f32 (ARM64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
3709 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
3710 defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
3711 def : Pat<(v2i32 (ARM64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
3712 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
3713 def : Pat<(v4i32 (ARM64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
3714 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
3715 def : Pat<(v4i16 (ARM64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
3716 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
3717 def : Pat<(v8i16 (ARM64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
3718 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
3720 // EDIT per word: 2s & 4s with MSL shifter
3721 def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
3722 [(set (v2i32 V64:$Rd),
3723 (ARM64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
3724 def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
3725 [(set (v4i32 V128:$Rd),
3726 (ARM64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
3728 // Per byte: 8b & 16b
3729 def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0b1110, V64, imm0_255,
3731 [(set (v8i8 V64:$Rd), (ARM64movi imm0_255:$imm8))]>;
3732 def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1110, V128, imm0_255,
3734 [(set (v16i8 V128:$Rd), (ARM64movi imm0_255:$imm8))]>;
3738 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
3739 defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
3740 def : Pat<(v2i32 (ARM64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
3741 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
3742 def : Pat<(v4i32 (ARM64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
3743 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
3744 def : Pat<(v4i16 (ARM64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
3745 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
3746 def : Pat<(v8i16 (ARM64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
3747 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
3749 // EDIT per word: 2s & 4s with MSL shifter
3750 def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
3751 [(set (v2i32 V64:$Rd),
3752 (ARM64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
3753 def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
3754 [(set (v4i32 V128:$Rd),
3755 (ARM64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
3757 //----------------------------------------------------------------------------
3758 // AdvSIMD indexed element
3759 //----------------------------------------------------------------------------
3761 let neverHasSideEffects = 1 in {
3762 defm FMLA : SIMDFPIndexedSDTied<0, 0b0001, "fmla">;
3763 defm FMLS : SIMDFPIndexedSDTied<0, 0b0101, "fmls">;
3766 // NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
3767 // instruction expects the addend first, while the intrinsic expects it last.
3769 // On the other hand, there are quite a few valid combinatorial options due to
3770 // the commutativity of multiplication and the fact that (-x) * y = x * (-y).
3771 defm : SIMDFPIndexedSDTiedPatterns<"FMLA",
3772 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
3773 defm : SIMDFPIndexedSDTiedPatterns<"FMLA",
3774 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
3776 defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
3777 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
3778 defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
3779 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
3780 defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
3781 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
3782 defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
3783 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
3785 multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
3786 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
3788 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
3789 (ARM64duplane32 (v4f32 (fneg V128:$Rm)),
3790 VectorIndexS:$idx))),
3791 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
3792 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
3793 (v2f32 (ARM64duplane32
3794 (v4f32 (insert_subvector undef,
3795 (v2f32 (fneg V64:$Rm)),
3797 VectorIndexS:$idx)))),
3798 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
3799 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
3800 VectorIndexS:$idx)>;
3801 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
3802 (ARM64dup (f32 (fneg FPR32Op:$Rm))))),
3803 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
3804 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
3806 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
3808 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
3809 (ARM64duplane32 (v4f32 (fneg V128:$Rm)),
3810 VectorIndexS:$idx))),
3811 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
3812 VectorIndexS:$idx)>;
3813 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
3814 (v4f32 (ARM64duplane32
3815 (v4f32 (insert_subvector undef,
3816 (v2f32 (fneg V64:$Rm)),
3818 VectorIndexS:$idx)))),
3819 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
3820 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
3821 VectorIndexS:$idx)>;
3822 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
3823 (ARM64dup (f32 (fneg FPR32Op:$Rm))))),
3824 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
3825 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
3827 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
3828 // (DUPLANE from 64-bit would be trivial).
3829 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
3830 (ARM64duplane64 (v2f64 (fneg V128:$Rm)),
3831 VectorIndexD:$idx))),
3833 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
3834 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
3835 (ARM64dup (f64 (fneg FPR64Op:$Rm))))),
3836 (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
3837 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
3839 // 2 variants for 32-bit scalar version: extract from .2s or from .4s
3840 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
3841 (vector_extract (v4f32 (fneg V128:$Rm)),
3842 VectorIndexS:$idx))),
3843 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
3844 V128:$Rm, VectorIndexS:$idx)>;
3845 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
3846 (vector_extract (v2f32 (fneg V64:$Rm)),
3847 VectorIndexS:$idx))),
3848 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
3849 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
3851 // 1 variant for 64-bit scalar version: extract from .1d or from .2d
3852 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
3853 (vector_extract (v2f64 (fneg V128:$Rm)),
3854 VectorIndexS:$idx))),
3855 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
3856 V128:$Rm, VectorIndexS:$idx)>;
3859 defm : FMLSIndexedAfterNegPatterns<
3860 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
3861 defm : FMLSIndexedAfterNegPatterns<
3862 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
3864 defm FMULX : SIMDFPIndexedSD<1, 0b1001, "fmulx", int_arm64_neon_fmulx>;
3865 defm FMUL : SIMDFPIndexedSD<0, 0b1001, "fmul", fmul>;
3867 def : Pat<(v2f32 (fmul V64:$Rn, (ARM64dup (f32 FPR32:$Rm)))),
3868 (FMULv2i32_indexed V64:$Rn,
3869 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
3871 def : Pat<(v4f32 (fmul V128:$Rn, (ARM64dup (f32 FPR32:$Rm)))),
3872 (FMULv4i32_indexed V128:$Rn,
3873 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
3875 def : Pat<(v2f64 (fmul V128:$Rn, (ARM64dup (f64 FPR64:$Rm)))),
3876 (FMULv2i64_indexed V128:$Rn,
3877 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
3880 defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_arm64_neon_sqdmulh>;
3881 defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_arm64_neon_sqrdmulh>;
3882 defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla",
3883 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>;
3884 defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls",
3885 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>;
3886 defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
3887 defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
3888 TriOpFrag<(add node:$LHS, (int_arm64_neon_smull node:$MHS, node:$RHS))>>;
3889 defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
3890 TriOpFrag<(sub node:$LHS, (int_arm64_neon_smull node:$MHS, node:$RHS))>>;
3891 defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
3892 int_arm64_neon_smull>;
3893 defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
3894 int_arm64_neon_sqadd>;
3895 defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
3896 int_arm64_neon_sqsub>;
3897 defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_arm64_neon_sqdmull>;
3898 defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
3899 TriOpFrag<(add node:$LHS, (int_arm64_neon_umull node:$MHS, node:$RHS))>>;
3900 defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
3901 TriOpFrag<(sub node:$LHS, (int_arm64_neon_umull node:$MHS, node:$RHS))>>;
3902 defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
3903 int_arm64_neon_umull>;
3905 // A scalar sqdmull with the second operand being a vector lane can be
3906 // handled directly with the indexed instruction encoding.
3907 def : Pat<(int_arm64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3908 (vector_extract (v4i32 V128:$Vm),
3909 VectorIndexS:$idx)),
3910 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
3912 //----------------------------------------------------------------------------
3913 // AdvSIMD scalar shift instructions
3914 //----------------------------------------------------------------------------
3915 defm FCVTZS : SIMDScalarRShiftSD<0, 0b11111, "fcvtzs">;
3916 defm FCVTZU : SIMDScalarRShiftSD<1, 0b11111, "fcvtzu">;
3917 defm SCVTF : SIMDScalarRShiftSD<0, 0b11100, "scvtf">;
3918 defm UCVTF : SIMDScalarRShiftSD<1, 0b11100, "ucvtf">;
3919 // Codegen patterns for the above. We don't put these directly on the
3920 // instructions because TableGen's type inference can't handle the truth.
3921 // Having the same base pattern for fp <--> int totally freaks it out.
3922 def : Pat<(int_arm64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
3923 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
3924 def : Pat<(int_arm64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
3925 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
3926 def : Pat<(i64 (int_arm64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
3927 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
3928 def : Pat<(i64 (int_arm64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
3929 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
3930 def : Pat<(v1i64 (int_arm64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
3932 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
3933 def : Pat<(v1i64 (int_arm64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
3935 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
3936 def : Pat<(int_arm64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
3937 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
3938 def : Pat<(int_arm64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
3939 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
3940 def : Pat<(f64 (int_arm64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
3941 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
3942 def : Pat<(f64 (int_arm64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
3943 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
3944 def : Pat<(v1f64 (int_arm64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
3946 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
3947 def : Pat<(v1f64 (int_arm64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
3949 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
3951 defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", ARM64vshl>;
3952 defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
3953 defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
3954 int_arm64_neon_sqrshrn>;
3955 defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
3956 int_arm64_neon_sqrshrun>;
3957 defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", ARM64sqshlui>;
3958 defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", ARM64sqshli>;
3959 defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
3960 int_arm64_neon_sqshrn>;
3961 defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
3962 int_arm64_neon_sqshrun>;
3963 defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
3964 defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", ARM64srshri>;
3965 defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
3966 TriOpFrag<(add node:$LHS,
3967 (ARM64srshri node:$MHS, node:$RHS))>>;
3968 defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", ARM64vashr>;
3969 defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
3970 TriOpFrag<(add node:$LHS,
3971 (ARM64vashr node:$MHS, node:$RHS))>>;
3972 defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
3973 int_arm64_neon_uqrshrn>;
3974 defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", ARM64uqshli>;
3975 defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
3976 int_arm64_neon_uqshrn>;
3977 defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", ARM64urshri>;
3978 defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
3979 TriOpFrag<(add node:$LHS,
3980 (ARM64urshri node:$MHS, node:$RHS))>>;
3981 defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", ARM64vlshr>;
3982 defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
3983 TriOpFrag<(add node:$LHS,
3984 (ARM64vlshr node:$MHS, node:$RHS))>>;
3986 //----------------------------------------------------------------------------
3987 // AdvSIMD vector shift instructions
3988 //----------------------------------------------------------------------------
3989 defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_arm64_neon_vcvtfp2fxs>;
3990 defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_arm64_neon_vcvtfp2fxu>;
3991 defm SCVTF: SIMDVectorRShiftSDToFP<0, 0b11100, "scvtf",
3992 int_arm64_neon_vcvtfxs2fp>;
3993 defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
3994 int_arm64_neon_rshrn>;
3995 defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", ARM64vshl>;
3996 defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
3997 BinOpFrag<(trunc (ARM64vashr node:$LHS, node:$RHS))>>;
3998 defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_arm64_neon_vsli>;
3999 def : Pat<(v1i64 (int_arm64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4000 (i32 vecshiftL64:$imm))),
4001 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
4002 defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
4003 int_arm64_neon_sqrshrn>;
4004 defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
4005 int_arm64_neon_sqrshrun>;
4006 defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", ARM64sqshlui>;
4007 defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", ARM64sqshli>;
4008 defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
4009 int_arm64_neon_sqshrn>;
4010 defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
4011 int_arm64_neon_sqshrun>;
4012 defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_arm64_neon_vsri>;
4013 def : Pat<(v1i64 (int_arm64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4014 (i32 vecshiftR64:$imm))),
4015 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
4016 defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", ARM64srshri>;
4017 defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
4018 TriOpFrag<(add node:$LHS,
4019 (ARM64srshri node:$MHS, node:$RHS))> >;
4020 defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
4021 BinOpFrag<(ARM64vshl (sext node:$LHS), node:$RHS)>>;
4023 defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", ARM64vashr>;
4024 defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
4025 TriOpFrag<(add node:$LHS, (ARM64vashr node:$MHS, node:$RHS))>>;
4026 defm UCVTF : SIMDVectorRShiftSDToFP<1, 0b11100, "ucvtf",
4027 int_arm64_neon_vcvtfxu2fp>;
4028 defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
4029 int_arm64_neon_uqrshrn>;
4030 defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", ARM64uqshli>;
4031 defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
4032 int_arm64_neon_uqshrn>;
4033 defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", ARM64urshri>;
4034 defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
4035 TriOpFrag<(add node:$LHS,
4036 (ARM64urshri node:$MHS, node:$RHS))> >;
4037 defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
4038 BinOpFrag<(ARM64vshl (zext node:$LHS), node:$RHS)>>;
4039 defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", ARM64vlshr>;
4040 defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
4041 TriOpFrag<(add node:$LHS, (ARM64vlshr node:$MHS, node:$RHS))> >;
4043 // SHRN patterns for when a logical right shift was used instead of arithmetic
4044 // (the immediate guarantees no sign bits actually end up in the result so it
4046 def : Pat<(v8i8 (trunc (ARM64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
4047 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
4048 def : Pat<(v4i16 (trunc (ARM64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
4049 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
4050 def : Pat<(v2i32 (trunc (ARM64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
4051 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
4053 def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
4054 (trunc (ARM64vlshr (v8i16 V128:$Rn),
4055 vecshiftR16Narrow:$imm)))),
4056 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4057 V128:$Rn, vecshiftR16Narrow:$imm)>;
4058 def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
4059 (trunc (ARM64vlshr (v4i32 V128:$Rn),
4060 vecshiftR32Narrow:$imm)))),
4061 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4062 V128:$Rn, vecshiftR32Narrow:$imm)>;
4063 def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
4064 (trunc (ARM64vlshr (v2i64 V128:$Rn),
4065 vecshiftR64Narrow:$imm)))),
4066 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4067 V128:$Rn, vecshiftR32Narrow:$imm)>;
4069 // Vector sign and zero extensions are implemented with SSHLL and USSHLL.
4070 // Anyexts are implemented as zexts.
4071 def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
4072 def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4073 def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4074 def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
4075 def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4076 def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4077 def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
4078 def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4079 def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4080 // Also match an extend from the upper half of a 128 bit source register.
4081 def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4082 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4083 def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4084 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4085 def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4086 (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
4087 def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4088 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4089 def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4090 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4091 def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4092 (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
4093 def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4094 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4095 def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4096 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4097 def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4098 (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
4100 // Vector shift sxtl aliases
4101 def : InstAlias<"sxtl.8h $dst, $src1",
4102 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4103 def : InstAlias<"sxtl $dst.8h, $src1.8b",
4104 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4105 def : InstAlias<"sxtl.4s $dst, $src1",
4106 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4107 def : InstAlias<"sxtl $dst.4s, $src1.4h",
4108 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4109 def : InstAlias<"sxtl.2d $dst, $src1",
4110 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4111 def : InstAlias<"sxtl $dst.2d, $src1.2s",
4112 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4114 // Vector shift sxtl2 aliases
4115 def : InstAlias<"sxtl2.8h $dst, $src1",
4116 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4117 def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
4118 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4119 def : InstAlias<"sxtl2.4s $dst, $src1",
4120 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4121 def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
4122 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4123 def : InstAlias<"sxtl2.2d $dst, $src1",
4124 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4125 def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
4126 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4128 // Vector shift uxtl aliases
4129 def : InstAlias<"uxtl.8h $dst, $src1",
4130 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4131 def : InstAlias<"uxtl $dst.8h, $src1.8b",
4132 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4133 def : InstAlias<"uxtl.4s $dst, $src1",
4134 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4135 def : InstAlias<"uxtl $dst.4s, $src1.4h",
4136 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4137 def : InstAlias<"uxtl.2d $dst, $src1",
4138 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4139 def : InstAlias<"uxtl $dst.2d, $src1.2s",
4140 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4142 // Vector shift uxtl2 aliases
4143 def : InstAlias<"uxtl2.8h $dst, $src1",
4144 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4145 def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
4146 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4147 def : InstAlias<"uxtl2.4s $dst, $src1",
4148 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4149 def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
4150 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4151 def : InstAlias<"uxtl2.2d $dst, $src1",
4152 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4153 def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
4154 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4156 // If an integer is about to be converted to a floating point value,
4157 // just load it on the floating point unit.
4158 // These patterns are more complex because floating point loads do not
4159 // support sign extension.
4160 // The sign extension has to be explicitly added and is only supported for
4161 // one step: byte-to-half, half-to-word, word-to-doubleword.
4162 // SCVTF GPR -> FPR is 9 cycles.
4163 // SCVTF FPR -> FPR is 4 cyclces.
4164 // (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
4165 // Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
4166 // and still being faster.
4167 // However, this is not good for code size.
4168 // 8-bits -> float. 2 sizes step-up.
4169 def : Pat <(f32 (sint_to_fp (i32 (sextloadi8 ro_indexed8:$addr)))),
4170 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4175 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4176 (LDRBro ro_indexed8:$addr),
4181 ssub)))>, Requires<[NotForCodeSize]>;
4182 def : Pat <(f32 (sint_to_fp (i32 (sextloadi8 am_indexed8:$addr)))),
4183 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4188 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4189 (LDRBui am_indexed8:$addr),
4194 ssub)))>, Requires<[NotForCodeSize]>;
4195 def : Pat <(f32 (sint_to_fp (i32 (sextloadi8 am_unscaled8:$addr)))),
4196 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4201 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4202 (LDURBi am_unscaled8:$addr),
4207 ssub)))>, Requires<[NotForCodeSize]>;
4208 // 16-bits -> float. 1 size step-up.
4209 def : Pat <(f32 (sint_to_fp (i32 (sextloadi16 ro_indexed16:$addr)))),
4210 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4212 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4213 (LDRHro ro_indexed16:$addr),
4216 ssub)))>, Requires<[NotForCodeSize]>;
4217 def : Pat <(f32 (sint_to_fp (i32 (sextloadi16 am_indexed16:$addr)))),
4218 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4220 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4221 (LDRHui am_indexed16:$addr),
4224 ssub)))>, Requires<[NotForCodeSize]>;
4225 def : Pat <(f32 (sint_to_fp (i32 (sextloadi16 am_unscaled16:$addr)))),
4226 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4228 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4229 (LDURHi am_unscaled16:$addr),
4232 ssub)))>, Requires<[NotForCodeSize]>;
4233 // 32-bits to 32-bits are handled in target specific dag combine:
4234 // performIntToFpCombine.
4235 // 64-bits integer to 32-bits floating point, not possible with
4236 // SCVTF on floating point registers (both source and destination
4237 // must have the same size).
4239 // Here are the patterns for 8, 16, 32, and 64-bits to double.
4240 // 8-bits -> double. 3 size step-up: give up.
4241 // 16-bits -> double. 2 size step.
4242 def : Pat <(f64 (sint_to_fp (i32 (sextloadi16 ro_indexed16:$addr)))),
4243 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4248 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4249 (LDRHro ro_indexed16:$addr),
4254 dsub)))>, Requires<[NotForCodeSize]>;
4255 def : Pat <(f64 (sint_to_fp (i32 (sextloadi16 am_indexed16:$addr)))),
4256 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4261 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4262 (LDRHui am_indexed16:$addr),
4267 dsub)))>, Requires<[NotForCodeSize]>;
4268 def : Pat <(f64 (sint_to_fp (i32 (sextloadi16 am_unscaled16:$addr)))),
4269 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4274 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4275 (LDURHi am_unscaled16:$addr),
4280 dsub)))>, Requires<[NotForCodeSize]>;
4281 // 32-bits -> double. 1 size step-up.
4282 def : Pat <(f64 (sint_to_fp (i32 (load ro_indexed32:$addr)))),
4283 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4285 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4286 (LDRSro ro_indexed32:$addr),
4289 dsub)))>, Requires<[NotForCodeSize]>;
4290 def : Pat <(f64 (sint_to_fp (i32 (load am_indexed32:$addr)))),
4291 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4293 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4294 (LDRSui am_indexed32:$addr),
4297 dsub)))>, Requires<[NotForCodeSize]>;
4298 def : Pat <(f64 (sint_to_fp (i32 (load am_unscaled32:$addr)))),
4299 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4301 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4302 (LDURSi am_unscaled32:$addr),
4305 dsub)))>, Requires<[NotForCodeSize]>;
4306 // 64-bits -> double are handled in target specific dag combine:
4307 // performIntToFpCombine.
4310 //----------------------------------------------------------------------------
4311 // AdvSIMD Load-Store Structure
4312 //----------------------------------------------------------------------------
4313 defm LD1 : SIMDLd1Multiple<"ld1">;
4314 defm LD2 : SIMDLd2Multiple<"ld2">;
4315 defm LD3 : SIMDLd3Multiple<"ld3">;
4316 defm LD4 : SIMDLd4Multiple<"ld4">;
4318 defm ST1 : SIMDSt1Multiple<"st1">;
4319 defm ST2 : SIMDSt2Multiple<"st2">;
4320 defm ST3 : SIMDSt3Multiple<"st3">;
4321 defm ST4 : SIMDSt4Multiple<"st4">;
4323 class Ld1Pat<ValueType ty, Instruction INST>
4324 : Pat<(ty (load am_simdnoindex:$vaddr)), (INST am_simdnoindex:$vaddr)>;
4326 def : Ld1Pat<v16i8, LD1Onev16b>;
4327 def : Ld1Pat<v8i16, LD1Onev8h>;
4328 def : Ld1Pat<v4i32, LD1Onev4s>;
4329 def : Ld1Pat<v2i64, LD1Onev2d>;
4330 def : Ld1Pat<v8i8, LD1Onev8b>;
4331 def : Ld1Pat<v4i16, LD1Onev4h>;
4332 def : Ld1Pat<v2i32, LD1Onev2s>;
4333 def : Ld1Pat<v1i64, LD1Onev1d>;
4335 class St1Pat<ValueType ty, Instruction INST>
4336 : Pat<(store ty:$Vt, am_simdnoindex:$vaddr),
4337 (INST ty:$Vt, am_simdnoindex:$vaddr)>;
4339 def : St1Pat<v16i8, ST1Onev16b>;
4340 def : St1Pat<v8i16, ST1Onev8h>;
4341 def : St1Pat<v4i32, ST1Onev4s>;
4342 def : St1Pat<v2i64, ST1Onev2d>;
4343 def : St1Pat<v8i8, ST1Onev8b>;
4344 def : St1Pat<v4i16, ST1Onev4h>;
4345 def : St1Pat<v2i32, ST1Onev2s>;
4346 def : St1Pat<v1i64, ST1Onev1d>;
4352 defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
4353 defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
4354 defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
4355 defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
4356 let mayLoad = 1, neverHasSideEffects = 1 in {
4357 defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
4358 defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
4359 defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
4360 defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
4361 defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
4362 defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
4363 defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
4364 defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
4365 defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
4366 defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
4367 defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
4368 defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
4369 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
4370 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
4371 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
4372 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
4375 def : Pat<(v8i8 (ARM64dup (i32 (extloadi8 am_simdnoindex:$vaddr)))),
4376 (LD1Rv8b am_simdnoindex:$vaddr)>;
4377 def : Pat<(v16i8 (ARM64dup (i32 (extloadi8 am_simdnoindex:$vaddr)))),
4378 (LD1Rv16b am_simdnoindex:$vaddr)>;
4379 def : Pat<(v4i16 (ARM64dup (i32 (extloadi16 am_simdnoindex:$vaddr)))),
4380 (LD1Rv4h am_simdnoindex:$vaddr)>;
4381 def : Pat<(v8i16 (ARM64dup (i32 (extloadi16 am_simdnoindex:$vaddr)))),
4382 (LD1Rv8h am_simdnoindex:$vaddr)>;
4383 def : Pat<(v2i32 (ARM64dup (i32 (load am_simdnoindex:$vaddr)))),
4384 (LD1Rv2s am_simdnoindex:$vaddr)>;
4385 def : Pat<(v4i32 (ARM64dup (i32 (load am_simdnoindex:$vaddr)))),
4386 (LD1Rv4s am_simdnoindex:$vaddr)>;
4387 def : Pat<(v2i64 (ARM64dup (i64 (load am_simdnoindex:$vaddr)))),
4388 (LD1Rv2d am_simdnoindex:$vaddr)>;
4389 def : Pat<(v1i64 (ARM64dup (i64 (load am_simdnoindex:$vaddr)))),
4390 (LD1Rv1d am_simdnoindex:$vaddr)>;
4391 // Grab the floating point version too
4392 def : Pat<(v2f32 (ARM64dup (f32 (load am_simdnoindex:$vaddr)))),
4393 (LD1Rv2s am_simdnoindex:$vaddr)>;
4394 def : Pat<(v4f32 (ARM64dup (f32 (load am_simdnoindex:$vaddr)))),
4395 (LD1Rv4s am_simdnoindex:$vaddr)>;
4396 def : Pat<(v2f64 (ARM64dup (f64 (load am_simdnoindex:$vaddr)))),
4397 (LD1Rv2d am_simdnoindex:$vaddr)>;
4398 def : Pat<(v1f64 (ARM64dup (f64 (load am_simdnoindex:$vaddr)))),
4399 (LD1Rv1d am_simdnoindex:$vaddr)>;
4401 class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
4402 ValueType VTy, ValueType STy, Instruction LD1>
4403 : Pat<(vector_insert (VTy VecListOne128:$Rd),
4404 (STy (scalar_load am_simdnoindex:$vaddr)), VecIndex:$idx),
4405 (LD1 VecListOne128:$Rd, VecIndex:$idx, am_simdnoindex:$vaddr)>;
4407 def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
4408 def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
4409 def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
4410 def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
4411 def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
4412 def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
4414 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
4415 ValueType VTy, ValueType STy, Instruction LD1>
4416 : Pat<(vector_insert (VTy VecListOne64:$Rd),
4417 (STy (scalar_load am_simdnoindex:$vaddr)), VecIndex:$idx),
4419 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
4420 VecIndex:$idx, am_simdnoindex:$vaddr),
4423 def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
4424 def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
4425 def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
4426 def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
4429 defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
4430 defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
4431 defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
4432 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
4435 defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
4436 defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
4437 defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
4438 defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
4440 let AddedComplexity = 8 in
4441 class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
4442 ValueType VTy, ValueType STy, Instruction ST1>
4444 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
4445 am_simdnoindex:$vaddr),
4446 (ST1 VecListOne128:$Vt, VecIndex:$idx, am_simdnoindex:$vaddr)>;
4448 def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
4449 def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
4450 def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
4451 def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
4452 def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
4453 def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
4455 let AddedComplexity = 8 in
4456 class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
4457 ValueType VTy, ValueType STy, Instruction ST1>
4459 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
4460 am_simdnoindex:$vaddr),
4461 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
4462 VecIndex:$idx, am_simdnoindex:$vaddr)>;
4464 def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
4465 def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
4466 def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
4467 def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
4469 multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
4470 ValueType VTy, ValueType STy, Instruction ST1,
4472 def : Pat<(scalar_store
4473 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
4474 am_simdnoindex:$vaddr, offset),
4475 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
4476 VecIndex:$idx, am_simdnoindex:$vaddr, XZR)>;
4478 def : Pat<(scalar_store
4479 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
4480 am_simdnoindex:$vaddr, GPR64:$Rm),
4481 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
4482 VecIndex:$idx, am_simdnoindex:$vaddr, $Rm)>;
4485 defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
4486 defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
4488 defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
4489 defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
4490 defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
4491 defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
4493 multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
4494 ValueType VTy, ValueType STy, Instruction ST1,
4496 def : Pat<(scalar_store
4497 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
4498 am_simdnoindex:$vaddr, offset),
4499 (ST1 VecListOne128:$Vt, VecIndex:$idx, am_simdnoindex:$vaddr, XZR)>;
4501 def : Pat<(scalar_store
4502 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
4503 am_simdnoindex:$vaddr, GPR64:$Rm),
4504 (ST1 VecListOne128:$Vt, VecIndex:$idx, am_simdnoindex:$vaddr, $Rm)>;
4507 defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
4509 defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
4511 defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
4512 defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
4513 defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
4514 defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
4516 let mayStore = 1, neverHasSideEffects = 1 in {
4517 defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
4518 defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
4519 defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
4520 defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
4521 defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
4522 defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
4523 defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
4524 defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
4525 defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
4526 defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
4527 defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
4528 defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
4531 defm ST1 : SIMDLdSt1SingleAliases<"st1">;
4532 defm ST2 : SIMDLdSt2SingleAliases<"st2">;
4533 defm ST3 : SIMDLdSt3SingleAliases<"st3">;
4534 defm ST4 : SIMDLdSt4SingleAliases<"st4">;
4536 //----------------------------------------------------------------------------
4537 // Crypto extensions
4538 //----------------------------------------------------------------------------
4540 def AESErr : AESTiedInst<0b0100, "aese", int_arm64_crypto_aese>;
4541 def AESDrr : AESTiedInst<0b0101, "aesd", int_arm64_crypto_aesd>;
4542 def AESMCrr : AESInst< 0b0110, "aesmc", int_arm64_crypto_aesmc>;
4543 def AESIMCrr : AESInst< 0b0111, "aesimc", int_arm64_crypto_aesimc>;
4545 def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_arm64_crypto_sha1c>;
4546 def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_arm64_crypto_sha1p>;
4547 def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_arm64_crypto_sha1m>;
4548 def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_arm64_crypto_sha1su0>;
4549 def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_arm64_crypto_sha256h>;
4550 def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_arm64_crypto_sha256h2>;
4551 def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_arm64_crypto_sha256su1>;
4553 def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_arm64_crypto_sha1h>;
4554 def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_arm64_crypto_sha1su1>;
4555 def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_arm64_crypto_sha256su0>;
4557 //----------------------------------------------------------------------------
4559 //----------------------------------------------------------------------------
4560 // FIXME: Like for X86, these should go in their own separate .td file.
4562 // Any instruction that defines a 32-bit result leaves the high half of the
4563 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
4564 // be copying from a truncate. But any other 32-bit operation will zero-extend
4566 // FIXME: X86 also checks for CMOV here. Do we need something similar?
4567 def def32 : PatLeaf<(i32 GPR32:$src), [{
4568 return N->getOpcode() != ISD::TRUNCATE &&
4569 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
4570 N->getOpcode() != ISD::CopyFromReg;
4573 // In the case of a 32-bit def that is known to implicitly zero-extend,
4574 // we can use a SUBREG_TO_REG.
4575 def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
4577 // For an anyext, we don't care what the high bits are, so we can perform an
4578 // INSERT_SUBREF into an IMPLICIT_DEF.
4579 def : Pat<(i64 (anyext GPR32:$src)),
4580 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
4582 // When we need to explicitly zero-extend, we use an unsigned bitfield move
4583 // instruction (UBFM) on the enclosing super-reg.
4584 def : Pat<(i64 (zext GPR32:$src)),
4585 (UBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
4587 // To sign extend, we use a signed bitfield move instruction (SBFM) on the
4588 // containing super-reg.
4589 def : Pat<(i64 (sext GPR32:$src)),
4590 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
4591 def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
4592 def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
4593 def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
4594 def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
4595 def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
4596 def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
4597 def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
4599 def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
4600 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
4601 (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
4602 def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
4603 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
4604 (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
4606 def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
4607 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
4608 (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
4609 def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
4610 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
4611 (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
4613 def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
4614 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
4615 (i64 (i64shift_a imm0_63:$imm)),
4616 (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
4618 // sra patterns have an AddedComplexity of 10, so make sure we have a higher
4619 // AddedComplexity for the following patterns since we want to match sext + sra
4620 // patterns before we attempt to match a single sra node.
4621 let AddedComplexity = 20 in {
4622 // We support all sext + sra combinations which preserve at least one bit of the
4623 // original value which is to be sign extended. E.g. we support shifts up to
4625 def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
4626 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
4627 def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
4628 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
4630 def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
4631 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
4632 def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
4633 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
4635 def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
4636 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
4637 (i64 imm0_31:$imm), 31)>;
4638 } // AddedComplexity = 20
4640 // To truncate, we can simply extract from a subregister.
4641 def : Pat<(i32 (trunc GPR64sp:$src)),
4642 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
4644 // __builtin_trap() uses the BRK instruction on ARM64.
4645 def : Pat<(trap), (BRK 1)>;
4647 // Conversions within AdvSIMD types in the same register size are free.
4648 // But because we need a consistent lane ordering, in big endian many
4649 // conversions require one or more REV instructions.
4651 // Consider a simple memory load followed by a bitconvert then a store.
4653 // v1 = BITCAST v2i32 v0 to v4i16
4656 // In big endian mode every memory access has an implicit byte swap. LDR and
4657 // STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
4658 // is, they treat the vector as a sequence of elements to be byte-swapped.
4659 // The two pairs of instructions are fundamentally incompatible. We've decided
4660 // to use LD1/ST1 only to simplify compiler implementation.
4662 // LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
4663 // the original code sequence:
4665 // v1 = REV v2i32 (implicit)
4666 // v2 = BITCAST v2i32 v1 to v4i16
4667 // v3 = REV v4i16 v2 (implicit)
4670 // But this is now broken - the value stored is different to the value loaded
4671 // due to lane reordering. To fix this, on every BITCAST we must perform two
4674 // v1 = REV v2i32 (implicit)
4676 // v3 = BITCAST v2i32 v2 to v4i16
4678 // v5 = REV v4i16 v4 (implicit)
4681 // This means an extra two instructions, but actually in most cases the two REV
4682 // instructions can be combined into one. For example:
4683 // (REV64_2s (REV64_4h X)) === (REV32_4h X)
4685 // There is also no 128-bit REV instruction. This must be synthesized with an
4688 // Most bitconverts require some sort of conversion. The only exceptions are:
4689 // a) Identity conversions - vNfX <-> vNiX
4690 // b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
4693 let Predicates = [IsLE] in {
4694 def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4695 def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4696 def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4697 def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4699 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
4700 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4701 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
4702 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4703 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
4704 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4705 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
4706 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4707 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
4708 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4710 let Predicates = [IsBE] in {
4711 def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
4712 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
4713 def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
4714 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
4715 def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
4716 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
4717 def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
4718 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
4720 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
4721 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
4722 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
4723 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
4724 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
4725 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
4726 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
4727 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
4729 def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4730 def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4731 def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
4732 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4733 def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
4734 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4735 def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
4736 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4737 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
4739 def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
4740 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
4741 def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
4742 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
4743 def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
4744 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
4745 def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
4746 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
4747 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
4748 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
4750 let Predicates = [IsLE] in {
4751 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
4752 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
4753 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
4754 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
4756 let Predicates = [IsBE] in {
4757 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
4758 (v1i64 (REV64v2i32 FPR64:$src))>;
4759 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
4760 (v1i64 (REV64v4i16 FPR64:$src))>;
4761 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
4762 (v1i64 (REV64v8i8 FPR64:$src))>;
4763 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
4764 (v1i64 (REV64v2i32 FPR64:$src))>;
4766 def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
4767 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
4769 let Predicates = [IsLE] in {
4770 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
4771 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
4772 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
4773 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
4774 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
4776 let Predicates = [IsBE] in {
4777 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
4778 (v2i32 (REV64v2i32 FPR64:$src))>;
4779 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
4780 (v2i32 (REV32v4i16 FPR64:$src))>;
4781 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
4782 (v2i32 (REV32v8i8 FPR64:$src))>;
4783 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
4784 (v2i32 (REV64v2i32 FPR64:$src))>;
4785 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
4786 (v2i32 (REV64v2i32 FPR64:$src))>;
4788 def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
4790 let Predicates = [IsLE] in {
4791 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
4792 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
4793 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
4794 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
4795 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
4796 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
4798 let Predicates = [IsBE] in {
4799 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
4800 (v4i16 (REV64v4i16 FPR64:$src))>;
4801 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
4802 (v4i16 (REV32v4i16 FPR64:$src))>;
4803 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
4804 (v4i16 (REV16v8i8 FPR64:$src))>;
4805 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
4806 (v4i16 (REV64v4i16 FPR64:$src))>;
4807 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
4808 (v4i16 (REV32v4i16 FPR64:$src))>;
4809 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
4810 (v4i16 (REV64v4i16 FPR64:$src))>;
4813 let Predicates = [IsLE] in {
4814 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
4815 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
4816 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
4817 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
4818 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
4819 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
4821 let Predicates = [IsBE] in {
4822 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
4823 (v8i8 (REV64v8i8 FPR64:$src))>;
4824 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
4825 (v8i8 (REV32v8i8 FPR64:$src))>;
4826 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
4827 (v8i8 (REV16v8i8 FPR64:$src))>;
4828 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
4829 (v8i8 (REV64v8i8 FPR64:$src))>;
4830 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
4831 (v8i8 (REV32v8i8 FPR64:$src))>;
4832 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
4833 (v8i8 (REV64v8i8 FPR64:$src))>;
4836 let Predicates = [IsLE] in {
4837 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
4838 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
4839 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
4840 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
4842 let Predicates = [IsBE] in {
4843 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
4844 (f64 (REV64v2i32 FPR64:$src))>;
4845 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
4846 (f64 (REV64v4i16 FPR64:$src))>;
4847 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
4848 (f64 (REV64v2i32 FPR64:$src))>;
4849 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
4850 (f64 (REV64v8i8 FPR64:$src))>;
4852 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
4853 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
4855 let Predicates = [IsLE] in {
4856 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
4857 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
4858 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
4859 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
4861 let Predicates = [IsBE] in {
4862 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
4863 (v1f64 (REV64v2i32 FPR64:$src))>;
4864 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
4865 (v1f64 (REV64v4i16 FPR64:$src))>;
4866 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
4867 (v1f64 (REV64v8i8 FPR64:$src))>;
4868 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
4869 (v1f64 (REV64v2i32 FPR64:$src))>;
4871 def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
4872 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
4874 let Predicates = [IsLE] in {
4875 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
4876 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
4877 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
4878 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
4879 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
4881 let Predicates = [IsBE] in {
4882 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
4883 (v2f32 (REV64v2i32 FPR64:$src))>;
4884 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
4885 (v2f32 (REV32v4i16 FPR64:$src))>;
4886 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
4887 (v2f32 (REV32v8i8 FPR64:$src))>;
4888 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
4889 (v2f32 (REV64v2i32 FPR64:$src))>;
4890 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
4891 (v2f32 (REV64v2i32 FPR64:$src))>;
4893 def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
4895 let Predicates = [IsLE] in {
4896 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
4897 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
4898 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
4899 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
4900 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
4901 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
4903 let Predicates = [IsBE] in {
4904 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
4905 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
4906 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
4907 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
4908 (REV64v4i32 FPR128:$src), (i32 8)))>;
4909 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
4910 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
4911 (REV64v8i16 FPR128:$src), (i32 8)))>;
4912 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
4913 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
4914 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
4915 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
4916 (REV64v4i32 FPR128:$src), (i32 8)))>;
4917 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
4918 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
4919 (REV64v16i8 FPR128:$src), (i32 8)))>;
4922 let Predicates = [IsLE] in {
4923 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
4924 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
4925 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
4926 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
4927 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
4929 let Predicates = [IsBE] in {
4930 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
4931 (v2f64 (EXTv16i8 FPR128:$src,
4932 FPR128:$src, (i32 8)))>;
4933 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
4934 (v2f64 (REV64v4i32 FPR128:$src))>;
4935 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
4936 (v2f64 (REV64v8i16 FPR128:$src))>;
4937 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
4938 (v2f64 (REV64v16i8 FPR128:$src))>;
4939 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
4940 (v2f64 (REV64v4i32 FPR128:$src))>;
4942 def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
4944 let Predicates = [IsLE] in {
4945 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
4946 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
4947 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
4948 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
4949 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
4951 let Predicates = [IsBE] in {
4952 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
4953 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
4954 (REV64v4i32 FPR128:$src), (i32 8)))>;
4955 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
4956 (v4f32 (REV32v8i16 FPR128:$src))>;
4957 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
4958 (v4f32 (REV32v16i8 FPR128:$src))>;
4959 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
4960 (v4f32 (REV64v4i32 FPR128:$src))>;
4961 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
4962 (v4f32 (REV64v4i32 FPR128:$src))>;
4964 def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
4966 let Predicates = [IsLE] in {
4967 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
4968 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
4969 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
4970 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
4971 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
4973 let Predicates = [IsBE] in {
4974 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
4975 (v2i64 (EXTv16i8 FPR128:$src,
4976 FPR128:$src, (i32 8)))>;
4977 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
4978 (v2i64 (REV64v4i32 FPR128:$src))>;
4979 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
4980 (v2i64 (REV64v8i16 FPR128:$src))>;
4981 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
4982 (v2i64 (REV64v16i8 FPR128:$src))>;
4983 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
4984 (v2i64 (REV64v4i32 FPR128:$src))>;
4986 def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
4988 let Predicates = [IsLE] in {
4989 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
4990 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
4991 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
4992 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
4993 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
4995 let Predicates = [IsBE] in {
4996 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
4997 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
4998 (REV64v4i32 FPR128:$src),
5000 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
5001 (v4i32 (REV64v4i32 FPR128:$src))>;
5002 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
5003 (v4i32 (REV32v8i16 FPR128:$src))>;
5004 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
5005 (v4i32 (REV32v16i8 FPR128:$src))>;
5006 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
5007 (v4i32 (REV64v4i32 FPR128:$src))>;
5009 def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5011 let Predicates = [IsLE] in {
5012 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
5013 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5014 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5015 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5016 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5017 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5019 let Predicates = [IsBE] in {
5020 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
5021 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5022 (REV64v8i16 FPR128:$src),
5024 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
5025 (v8i16 (REV64v8i16 FPR128:$src))>;
5026 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
5027 (v8i16 (REV32v8i16 FPR128:$src))>;
5028 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
5029 (v8i16 (REV16v16i8 FPR128:$src))>;
5030 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
5031 (v8i16 (REV64v8i16 FPR128:$src))>;
5032 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
5033 (v8i16 (REV32v8i16 FPR128:$src))>;
5036 let Predicates = [IsLE] in {
5037 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
5038 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5039 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5040 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5041 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5042 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5044 let Predicates = [IsBE] in {
5045 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
5046 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
5047 (REV64v16i8 FPR128:$src),
5049 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
5050 (v16i8 (REV64v16i8 FPR128:$src))>;
5051 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
5052 (v16i8 (REV32v16i8 FPR128:$src))>;
5053 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
5054 (v16i8 (REV16v16i8 FPR128:$src))>;
5055 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
5056 (v16i8 (REV64v16i8 FPR128:$src))>;
5057 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
5058 (v16i8 (REV32v16i8 FPR128:$src))>;
5061 def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
5062 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5063 def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
5064 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5065 def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
5066 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5067 def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
5068 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5070 // A 64-bit subvector insert to the first 128-bit vector position
5071 // is a subregister copy that needs no instruction.
5072 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (i32 0)),
5073 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5074 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (i32 0)),
5075 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5076 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (i32 0)),
5077 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5078 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (i32 0)),
5079 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5080 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (i32 0)),
5081 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5082 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (i32 0)),
5083 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5085 // Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
5087 def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
5088 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
5089 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
5090 def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
5091 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
5092 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
5093 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
5094 // so we match on v4f32 here, not v2f32. This will also catch adding
5095 // the low two lanes of a true v4f32 vector.
5096 def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
5097 (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
5098 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
5100 // Scalar 64-bit shifts in FPR64 registers.
5101 def : Pat<(i64 (int_arm64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5102 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5103 def : Pat<(i64 (int_arm64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5104 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5105 def : Pat<(i64 (int_arm64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5106 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5107 def : Pat<(i64 (int_arm64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5108 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5110 // Tail call return handling. These are all compiler pseudo-instructions,
5111 // so no encoding information or anything like that.
5112 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
5113 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst), []>;
5114 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst), []>;
5117 def : Pat<(ARM64tcret tcGPR64:$dst), (TCRETURNri tcGPR64:$dst)>;
5118 def : Pat<(ARM64tcret (i64 tglobaladdr:$dst)), (TCRETURNdi texternalsym:$dst)>;
5119 def : Pat<(ARM64tcret (i64 texternalsym:$dst)), (TCRETURNdi texternalsym:$dst)>;
5121 include "ARM64InstrAtomics.td"