1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "aarch64-isel"
17 #include "AArch64ISelLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "AArch64TargetObjectFile.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/Support/MathExtras.h"
33 static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
34 assert (TM.getSubtarget<AArch64Subtarget>().isTargetELF() &&
35 "unknown subtarget type");
36 return new AArch64ElfTargetObjectFile();
39 AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
40 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
42 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
44 // SIMD compares set the entire lane's bits to 1
45 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
47 // Scalar register <-> type mapping
48 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
49 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
51 if (Subtarget->hasFPARMv8()) {
52 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
53 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
54 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
55 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
58 if (Subtarget->hasNEON()) {
60 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass);
61 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
62 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
63 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
64 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
65 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
66 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
67 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
68 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
69 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
70 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
71 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
72 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
73 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
74 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
75 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
78 computeRegisterProperties();
80 // We combine OR nodes for bitfield and NEON BSL operations.
81 setTargetDAGCombine(ISD::OR);
83 setTargetDAGCombine(ISD::AND);
84 setTargetDAGCombine(ISD::SRA);
85 setTargetDAGCombine(ISD::SRL);
86 setTargetDAGCombine(ISD::SHL);
88 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
89 setTargetDAGCombine(ISD::INTRINSIC_VOID);
90 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
92 // AArch64 does not have i1 loads, or much of anything for i1 really.
93 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
94 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
95 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
97 setStackPointerRegisterToSaveRestore(AArch64::XSP);
98 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
99 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
100 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
102 // We'll lower globals to wrappers for selection.
103 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
104 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
106 // A64 instructions have the comparison predicate attached to the user of the
107 // result, but having a separate comparison is valuable for matching.
108 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
109 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
110 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
111 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
113 setOperationAction(ISD::SELECT, MVT::i32, Custom);
114 setOperationAction(ISD::SELECT, MVT::i64, Custom);
115 setOperationAction(ISD::SELECT, MVT::f32, Custom);
116 setOperationAction(ISD::SELECT, MVT::f64, Custom);
118 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
119 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
120 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
121 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
123 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
125 setOperationAction(ISD::SETCC, MVT::i32, Custom);
126 setOperationAction(ISD::SETCC, MVT::i64, Custom);
127 setOperationAction(ISD::SETCC, MVT::f32, Custom);
128 setOperationAction(ISD::SETCC, MVT::f64, Custom);
130 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
131 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
132 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
134 setOperationAction(ISD::VASTART, MVT::Other, Custom);
135 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
136 setOperationAction(ISD::VAEND, MVT::Other, Expand);
137 setOperationAction(ISD::VAARG, MVT::Other, Expand);
139 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
140 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
142 setOperationAction(ISD::ROTL, MVT::i32, Expand);
143 setOperationAction(ISD::ROTL, MVT::i64, Expand);
145 setOperationAction(ISD::UREM, MVT::i32, Expand);
146 setOperationAction(ISD::UREM, MVT::i64, Expand);
147 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
148 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
150 setOperationAction(ISD::SREM, MVT::i32, Expand);
151 setOperationAction(ISD::SREM, MVT::i64, Expand);
152 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
153 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
155 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
156 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
157 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
158 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
160 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
161 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
163 // Legal floating-point operations.
164 setOperationAction(ISD::FABS, MVT::f32, Legal);
165 setOperationAction(ISD::FABS, MVT::f64, Legal);
167 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
168 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
170 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
171 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
173 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
174 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
176 setOperationAction(ISD::FNEG, MVT::f32, Legal);
177 setOperationAction(ISD::FNEG, MVT::f64, Legal);
179 setOperationAction(ISD::FRINT, MVT::f32, Legal);
180 setOperationAction(ISD::FRINT, MVT::f64, Legal);
182 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
183 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
185 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
186 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
188 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
189 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
190 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
192 // Illegal floating-point operations.
193 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
194 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
196 setOperationAction(ISD::FCOS, MVT::f32, Expand);
197 setOperationAction(ISD::FCOS, MVT::f64, Expand);
199 setOperationAction(ISD::FEXP, MVT::f32, Expand);
200 setOperationAction(ISD::FEXP, MVT::f64, Expand);
202 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
203 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
205 setOperationAction(ISD::FLOG, MVT::f32, Expand);
206 setOperationAction(ISD::FLOG, MVT::f64, Expand);
208 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
209 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
211 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
212 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
214 setOperationAction(ISD::FPOW, MVT::f32, Expand);
215 setOperationAction(ISD::FPOW, MVT::f64, Expand);
217 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
218 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
220 setOperationAction(ISD::FREM, MVT::f32, Expand);
221 setOperationAction(ISD::FREM, MVT::f64, Expand);
223 setOperationAction(ISD::FSIN, MVT::f32, Expand);
224 setOperationAction(ISD::FSIN, MVT::f64, Expand);
226 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
227 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
229 // Virtually no operation on f128 is legal, but LLVM can't expand them when
230 // there's a valid register class, so we need custom operations in most cases.
231 setOperationAction(ISD::FABS, MVT::f128, Expand);
232 setOperationAction(ISD::FADD, MVT::f128, Custom);
233 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
234 setOperationAction(ISD::FCOS, MVT::f128, Expand);
235 setOperationAction(ISD::FDIV, MVT::f128, Custom);
236 setOperationAction(ISD::FMA, MVT::f128, Expand);
237 setOperationAction(ISD::FMUL, MVT::f128, Custom);
238 setOperationAction(ISD::FNEG, MVT::f128, Expand);
239 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
240 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
241 setOperationAction(ISD::FPOW, MVT::f128, Expand);
242 setOperationAction(ISD::FREM, MVT::f128, Expand);
243 setOperationAction(ISD::FRINT, MVT::f128, Expand);
244 setOperationAction(ISD::FSIN, MVT::f128, Expand);
245 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
246 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
247 setOperationAction(ISD::FSUB, MVT::f128, Custom);
248 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
249 setOperationAction(ISD::SETCC, MVT::f128, Custom);
250 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
251 setOperationAction(ISD::SELECT, MVT::f128, Expand);
252 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
253 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
255 // Lowering for many of the conversions is actually specified by the non-f128
256 // type. The LowerXXX function will be trivial when f128 isn't involved.
257 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
258 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
259 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
260 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
261 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
262 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
263 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
264 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
265 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
266 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
267 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
268 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
269 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
270 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
272 // i128 shift operation support
273 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
274 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
275 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
277 // This prevents LLVM trying to compress double constants into a floating
278 // constant-pool entry and trying to load from there. It's of doubtful benefit
279 // for A64: we'd need LDR followed by FCVT, I believe.
280 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
281 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
282 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
284 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
285 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
286 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
287 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
288 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
289 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
291 setExceptionPointerRegister(AArch64::X0);
292 setExceptionSelectorRegister(AArch64::X1);
294 if (Subtarget->hasNEON()) {
295 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Expand);
296 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand);
297 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand);
298 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v1i64, Expand);
299 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v16i8, Expand);
300 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Expand);
301 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand);
302 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Expand);
304 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
305 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
306 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
307 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
308 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
309 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
310 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
311 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
312 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
313 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
314 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
315 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
316 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
317 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
318 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
320 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
321 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
322 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
323 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
324 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
325 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
326 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
327 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
328 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
329 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
330 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
331 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
333 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i32, Legal);
334 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
335 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
336 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
337 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
338 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
339 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
341 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i8, Custom);
342 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i16, Custom);
343 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom);
344 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom);
345 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
347 setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
348 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
349 setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
350 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
351 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
352 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
353 setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
354 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
355 setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
356 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
357 setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
358 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
360 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
361 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
362 setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal);
363 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
365 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal);
366 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
367 setOperationAction(ISD::FCEIL, MVT::v1f64, Legal);
368 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
370 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal);
371 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
372 setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal);
373 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
375 setOperationAction(ISD::FRINT, MVT::v2f32, Legal);
376 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
377 setOperationAction(ISD::FRINT, MVT::v1f64, Legal);
378 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
380 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal);
381 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
382 setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal);
383 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
385 setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
386 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
387 setOperationAction(ISD::FROUND, MVT::v1f64, Legal);
388 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
390 setOperationAction(ISD::SINT_TO_FP, MVT::v1i8, Custom);
391 setOperationAction(ISD::SINT_TO_FP, MVT::v1i16, Custom);
392 setOperationAction(ISD::SINT_TO_FP, MVT::v1i32, Custom);
393 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
394 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
395 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom);
397 setOperationAction(ISD::UINT_TO_FP, MVT::v1i8, Custom);
398 setOperationAction(ISD::UINT_TO_FP, MVT::v1i16, Custom);
399 setOperationAction(ISD::UINT_TO_FP, MVT::v1i32, Custom);
400 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
401 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
402 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom);
404 setOperationAction(ISD::FP_TO_SINT, MVT::v1i8, Custom);
405 setOperationAction(ISD::FP_TO_SINT, MVT::v1i16, Custom);
406 setOperationAction(ISD::FP_TO_SINT, MVT::v1i32, Custom);
407 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
408 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
409 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Custom);
411 setOperationAction(ISD::FP_TO_UINT, MVT::v1i8, Custom);
412 setOperationAction(ISD::FP_TO_UINT, MVT::v1i16, Custom);
413 setOperationAction(ISD::FP_TO_UINT, MVT::v1i32, Custom);
414 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
415 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
416 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Custom);
418 // Neon does not support vector divide/remainder operations except
419 // floating-point divide.
420 setOperationAction(ISD::SDIV, MVT::v1i8, Expand);
421 setOperationAction(ISD::SDIV, MVT::v8i8, Expand);
422 setOperationAction(ISD::SDIV, MVT::v16i8, Expand);
423 setOperationAction(ISD::SDIV, MVT::v1i16, Expand);
424 setOperationAction(ISD::SDIV, MVT::v4i16, Expand);
425 setOperationAction(ISD::SDIV, MVT::v8i16, Expand);
426 setOperationAction(ISD::SDIV, MVT::v1i32, Expand);
427 setOperationAction(ISD::SDIV, MVT::v2i32, Expand);
428 setOperationAction(ISD::SDIV, MVT::v4i32, Expand);
429 setOperationAction(ISD::SDIV, MVT::v1i64, Expand);
430 setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
432 setOperationAction(ISD::UDIV, MVT::v1i8, Expand);
433 setOperationAction(ISD::UDIV, MVT::v8i8, Expand);
434 setOperationAction(ISD::UDIV, MVT::v16i8, Expand);
435 setOperationAction(ISD::UDIV, MVT::v1i16, Expand);
436 setOperationAction(ISD::UDIV, MVT::v4i16, Expand);
437 setOperationAction(ISD::UDIV, MVT::v8i16, Expand);
438 setOperationAction(ISD::UDIV, MVT::v1i32, Expand);
439 setOperationAction(ISD::UDIV, MVT::v2i32, Expand);
440 setOperationAction(ISD::UDIV, MVT::v4i32, Expand);
441 setOperationAction(ISD::UDIV, MVT::v1i64, Expand);
442 setOperationAction(ISD::UDIV, MVT::v2i64, Expand);
444 setOperationAction(ISD::SREM, MVT::v1i8, Expand);
445 setOperationAction(ISD::SREM, MVT::v8i8, Expand);
446 setOperationAction(ISD::SREM, MVT::v16i8, Expand);
447 setOperationAction(ISD::SREM, MVT::v1i16, Expand);
448 setOperationAction(ISD::SREM, MVT::v4i16, Expand);
449 setOperationAction(ISD::SREM, MVT::v8i16, Expand);
450 setOperationAction(ISD::SREM, MVT::v1i32, Expand);
451 setOperationAction(ISD::SREM, MVT::v2i32, Expand);
452 setOperationAction(ISD::SREM, MVT::v4i32, Expand);
453 setOperationAction(ISD::SREM, MVT::v1i64, Expand);
454 setOperationAction(ISD::SREM, MVT::v2i64, Expand);
456 setOperationAction(ISD::UREM, MVT::v1i8, Expand);
457 setOperationAction(ISD::UREM, MVT::v8i8, Expand);
458 setOperationAction(ISD::UREM, MVT::v16i8, Expand);
459 setOperationAction(ISD::UREM, MVT::v1i16, Expand);
460 setOperationAction(ISD::UREM, MVT::v4i16, Expand);
461 setOperationAction(ISD::UREM, MVT::v8i16, Expand);
462 setOperationAction(ISD::UREM, MVT::v1i32, Expand);
463 setOperationAction(ISD::UREM, MVT::v2i32, Expand);
464 setOperationAction(ISD::UREM, MVT::v4i32, Expand);
465 setOperationAction(ISD::UREM, MVT::v1i64, Expand);
466 setOperationAction(ISD::UREM, MVT::v2i64, Expand);
468 setOperationAction(ISD::FREM, MVT::v2f32, Expand);
469 setOperationAction(ISD::FREM, MVT::v4f32, Expand);
470 setOperationAction(ISD::FREM, MVT::v1f64, Expand);
471 setOperationAction(ISD::FREM, MVT::v2f64, Expand);
473 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
474 setOperationAction(ISD::SELECT, MVT::v16i8, Expand);
475 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
476 setOperationAction(ISD::SELECT, MVT::v8i16, Expand);
477 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
478 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
479 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
480 setOperationAction(ISD::SELECT, MVT::v2i64, Expand);
481 setOperationAction(ISD::SELECT, MVT::v2f32, Expand);
482 setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
483 setOperationAction(ISD::SELECT, MVT::v1f64, Expand);
484 setOperationAction(ISD::SELECT, MVT::v2f64, Expand);
486 setOperationAction(ISD::SELECT_CC, MVT::v8i8, Custom);
487 setOperationAction(ISD::SELECT_CC, MVT::v16i8, Custom);
488 setOperationAction(ISD::SELECT_CC, MVT::v4i16, Custom);
489 setOperationAction(ISD::SELECT_CC, MVT::v8i16, Custom);
490 setOperationAction(ISD::SELECT_CC, MVT::v2i32, Custom);
491 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Custom);
492 setOperationAction(ISD::SELECT_CC, MVT::v1i64, Custom);
493 setOperationAction(ISD::SELECT_CC, MVT::v2i64, Custom);
494 setOperationAction(ISD::SELECT_CC, MVT::v2f32, Custom);
495 setOperationAction(ISD::SELECT_CC, MVT::v4f32, Custom);
496 setOperationAction(ISD::SELECT_CC, MVT::v1f64, Custom);
497 setOperationAction(ISD::SELECT_CC, MVT::v2f64, Custom);
499 // Vector ExtLoad and TruncStore are expanded.
500 for (unsigned I = MVT::FIRST_VECTOR_VALUETYPE;
501 I <= MVT::LAST_VECTOR_VALUETYPE; ++I) {
502 MVT VT = (MVT::SimpleValueType) I;
503 setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
504 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
505 setLoadExtAction(ISD::EXTLOAD, VT, Expand);
506 for (unsigned II = MVT::FIRST_VECTOR_VALUETYPE;
507 II <= MVT::LAST_VECTOR_VALUETYPE; ++II) {
508 MVT VT1 = (MVT::SimpleValueType) II;
509 // A TruncStore has two vector types of the same number of elements
510 // and different element sizes.
511 if (VT.getVectorNumElements() == VT1.getVectorNumElements() &&
512 VT.getVectorElementType().getSizeInBits()
513 > VT1.getVectorElementType().getSizeInBits())
514 setTruncStoreAction(VT, VT1, Expand);
518 // There is no v1i64/v2i64 multiply, expand v1i64/v2i64 to GPR i64 multiply.
519 // FIXME: For a v2i64 multiply, we copy VPR to GPR and do 2 i64 multiplies,
520 // and then copy back to VPR. This solution may be optimized by Following 3
521 // NEON instructions:
522 // pmull v2.1q, v0.1d, v1.1d
523 // pmull2 v3.1q, v0.2d, v1.2d
524 // ins v2.d[1], v3.d[0]
525 // As currently we can't verify the correctness of such assumption, we can
526 // do such optimization in the future.
527 setOperationAction(ISD::MUL, MVT::v1i64, Expand);
528 setOperationAction(ISD::MUL, MVT::v2i64, Expand);
530 setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
531 setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
532 setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
533 setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
534 setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
535 setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
536 setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
537 setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
538 setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
541 setTargetDAGCombine(ISD::SIGN_EXTEND);
542 setTargetDAGCombine(ISD::VSELECT);
544 MaskAndBranchFoldingIsLegal = true;
547 EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
548 // It's reasonably important that this value matches the "natural" legal
549 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
550 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
551 if (!VT.isVector()) return MVT::i32;
552 return VT.changeVectorElementTypeToInteger();
555 static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
558 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
559 AArch64::LDXR_word, AArch64::LDXR_dword};
560 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
561 AArch64::LDAXR_word, AArch64::LDAXR_dword};
562 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
563 AArch64::STXR_word, AArch64::STXR_dword};
564 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
565 AArch64::STLXR_word, AArch64::STLXR_dword};
567 const unsigned *LoadOps, *StoreOps;
568 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
573 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
574 StoreOps = StoreRels;
576 StoreOps = StoreBares;
578 assert(isPowerOf2_32(Size) && Size <= 8 &&
579 "unsupported size for atomic binary op!");
581 LdrOpc = LoadOps[Log2_32(Size)];
582 StrOpc = StoreOps[Log2_32(Size)];
585 // FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really
586 // have value type mapped, and they are both being defined as MVT::untyped.
587 // Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost
588 // would fail to figure out the register pressure correctly.
589 std::pair<const TargetRegisterClass*, uint8_t>
590 AArch64TargetLowering::findRepresentativeClass(MVT VT) const{
591 const TargetRegisterClass *RRC = 0;
593 switch (VT.SimpleTy) {
595 return TargetLowering::findRepresentativeClass(VT);
597 RRC = &AArch64::QPairRegClass;
601 RRC = &AArch64::QQuadRegClass;
605 return std::make_pair(RRC, Cost);
609 AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
611 unsigned BinOpcode) const {
612 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
613 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
615 const BasicBlock *LLVM_BB = BB->getBasicBlock();
616 MachineFunction *MF = BB->getParent();
617 MachineFunction::iterator It = BB;
620 unsigned dest = MI->getOperand(0).getReg();
621 unsigned ptr = MI->getOperand(1).getReg();
622 unsigned incr = MI->getOperand(2).getReg();
623 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
624 DebugLoc dl = MI->getDebugLoc();
626 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
628 unsigned ldrOpc, strOpc;
629 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
631 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
632 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
633 MF->insert(It, loopMBB);
634 MF->insert(It, exitMBB);
636 // Transfer the remainder of BB and its successor edges to exitMBB.
637 exitMBB->splice(exitMBB->begin(), BB,
638 std::next(MachineBasicBlock::iterator(MI)), BB->end());
639 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
641 const TargetRegisterClass *TRC
642 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
643 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
647 // fallthrough --> loopMBB
648 BB->addSuccessor(loopMBB);
652 // <binop> scratch, dest, incr
653 // stxr stxr_status, scratch, ptr
654 // cbnz stxr_status, loopMBB
655 // fallthrough --> exitMBB
657 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
659 // All arithmetic operations we'll be creating are designed to take an extra
660 // shift or extend operand, which we can conveniently set to zero.
662 // Operand order needs to go the other way for NAND.
663 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
664 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
665 .addReg(incr).addReg(dest).addImm(0);
667 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
668 .addReg(dest).addReg(incr).addImm(0);
671 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
672 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
673 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
675 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
676 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
677 .addReg(stxr_status).addMBB(loopMBB);
679 BB->addSuccessor(loopMBB);
680 BB->addSuccessor(exitMBB);
686 MI->eraseFromParent(); // The instruction is gone now.
692 AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
693 MachineBasicBlock *BB,
696 A64CC::CondCodes Cond) const {
697 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
699 const BasicBlock *LLVM_BB = BB->getBasicBlock();
700 MachineFunction *MF = BB->getParent();
701 MachineFunction::iterator It = BB;
704 unsigned dest = MI->getOperand(0).getReg();
705 unsigned ptr = MI->getOperand(1).getReg();
706 unsigned incr = MI->getOperand(2).getReg();
707 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
709 unsigned oldval = dest;
710 DebugLoc dl = MI->getDebugLoc();
712 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
713 const TargetRegisterClass *TRC, *TRCsp;
715 TRC = &AArch64::GPR64RegClass;
716 TRCsp = &AArch64::GPR64xspRegClass;
718 TRC = &AArch64::GPR32RegClass;
719 TRCsp = &AArch64::GPR32wspRegClass;
722 unsigned ldrOpc, strOpc;
723 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
725 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
726 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
727 MF->insert(It, loopMBB);
728 MF->insert(It, exitMBB);
730 // Transfer the remainder of BB and its successor edges to exitMBB.
731 exitMBB->splice(exitMBB->begin(), BB,
732 std::next(MachineBasicBlock::iterator(MI)), BB->end());
733 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
735 unsigned scratch = MRI.createVirtualRegister(TRC);
736 MRI.constrainRegClass(scratch, TRCsp);
740 // fallthrough --> loopMBB
741 BB->addSuccessor(loopMBB);
745 // cmp incr, dest (, sign extend if necessary)
746 // csel scratch, dest, incr, cond
747 // stxr stxr_status, scratch, ptr
748 // cbnz stxr_status, loopMBB
749 // fallthrough --> exitMBB
751 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
753 // Build compare and cmov instructions.
754 MRI.constrainRegClass(incr, TRCsp);
755 BuildMI(BB, dl, TII->get(CmpOp))
756 .addReg(incr).addReg(oldval).addImm(0);
758 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
760 .addReg(oldval).addReg(incr).addImm(Cond);
762 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
763 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
765 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
766 .addReg(scratch).addReg(ptr);
767 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
768 .addReg(stxr_status).addMBB(loopMBB);
770 BB->addSuccessor(loopMBB);
771 BB->addSuccessor(exitMBB);
777 MI->eraseFromParent(); // The instruction is gone now.
783 AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
784 MachineBasicBlock *BB,
785 unsigned Size) const {
786 unsigned dest = MI->getOperand(0).getReg();
787 unsigned ptr = MI->getOperand(1).getReg();
788 unsigned oldval = MI->getOperand(2).getReg();
789 unsigned newval = MI->getOperand(3).getReg();
790 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
791 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
792 DebugLoc dl = MI->getDebugLoc();
794 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
795 const TargetRegisterClass *TRCsp;
796 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
798 unsigned ldrOpc, strOpc;
799 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
801 MachineFunction *MF = BB->getParent();
802 const BasicBlock *LLVM_BB = BB->getBasicBlock();
803 MachineFunction::iterator It = BB;
804 ++It; // insert the new blocks after the current block
806 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
807 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
808 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
809 MF->insert(It, loop1MBB);
810 MF->insert(It, loop2MBB);
811 MF->insert(It, exitMBB);
813 // Transfer the remainder of BB and its successor edges to exitMBB.
814 exitMBB->splice(exitMBB->begin(), BB,
815 std::next(MachineBasicBlock::iterator(MI)), BB->end());
816 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
820 // fallthrough --> loop1MBB
821 BB->addSuccessor(loop1MBB);
828 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
830 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
831 MRI.constrainRegClass(dest, TRCsp);
832 BuildMI(BB, dl, TII->get(CmpOp))
833 .addReg(dest).addReg(oldval).addImm(0);
834 BuildMI(BB, dl, TII->get(AArch64::Bcc))
835 .addImm(A64CC::NE).addMBB(exitMBB);
836 BB->addSuccessor(loop2MBB);
837 BB->addSuccessor(exitMBB);
840 // strex stxr_status, newval, [ptr]
841 // cbnz stxr_status, loop1MBB
843 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
844 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
846 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
847 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
848 .addReg(stxr_status).addMBB(loop1MBB);
849 BB->addSuccessor(loop1MBB);
850 BB->addSuccessor(exitMBB);
856 MI->eraseFromParent(); // The instruction is gone now.
862 AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
863 MachineBasicBlock *MBB) const {
864 // We materialise the F128CSEL pseudo-instruction using conditional branches
865 // and loads, giving an instruciton sequence like:
874 // Using virtual registers would probably not be beneficial since COPY
875 // instructions are expensive for f128 (there's no actual instruction to
878 // An alternative would be to do an integer-CSEL on some address. E.g.:
883 // csel x0, x0, x1, ne
886 // It's unclear which approach is actually optimal.
887 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
888 MachineFunction *MF = MBB->getParent();
889 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
890 DebugLoc DL = MI->getDebugLoc();
891 MachineFunction::iterator It = MBB;
894 unsigned DestReg = MI->getOperand(0).getReg();
895 unsigned IfTrueReg = MI->getOperand(1).getReg();
896 unsigned IfFalseReg = MI->getOperand(2).getReg();
897 unsigned CondCode = MI->getOperand(3).getImm();
898 bool NZCVKilled = MI->getOperand(4).isKill();
900 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
901 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
902 MF->insert(It, TrueBB);
903 MF->insert(It, EndBB);
905 // Transfer rest of current basic-block to EndBB
906 EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
908 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
910 // We need somewhere to store the f128 value needed.
911 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
913 // [... start of incoming MBB ...]
914 // str qIFFALSE, [sp]
917 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
919 .addFrameIndex(ScratchFI)
921 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
924 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
926 MBB->addSuccessor(TrueBB);
927 MBB->addSuccessor(EndBB);
930 // NZCV is live-through TrueBB.
931 TrueBB->addLiveIn(AArch64::NZCV);
932 EndBB->addLiveIn(AArch64::NZCV);
937 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
939 .addFrameIndex(ScratchFI)
942 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
944 TrueBB->addSuccessor(EndBB);
948 // [... rest of incoming MBB ...]
949 MachineInstr *StartOfEnd = EndBB->begin();
950 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
951 .addFrameIndex(ScratchFI)
954 MI->eraseFromParent();
959 AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
960 MachineBasicBlock *MBB) const {
961 switch (MI->getOpcode()) {
962 default: llvm_unreachable("Unhandled instruction with custom inserter");
963 case AArch64::F128CSEL:
964 return EmitF128CSEL(MI, MBB);
965 case AArch64::ATOMIC_LOAD_ADD_I8:
966 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
967 case AArch64::ATOMIC_LOAD_ADD_I16:
968 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
969 case AArch64::ATOMIC_LOAD_ADD_I32:
970 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
971 case AArch64::ATOMIC_LOAD_ADD_I64:
972 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
974 case AArch64::ATOMIC_LOAD_SUB_I8:
975 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
976 case AArch64::ATOMIC_LOAD_SUB_I16:
977 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
978 case AArch64::ATOMIC_LOAD_SUB_I32:
979 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
980 case AArch64::ATOMIC_LOAD_SUB_I64:
981 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
983 case AArch64::ATOMIC_LOAD_AND_I8:
984 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
985 case AArch64::ATOMIC_LOAD_AND_I16:
986 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
987 case AArch64::ATOMIC_LOAD_AND_I32:
988 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
989 case AArch64::ATOMIC_LOAD_AND_I64:
990 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
992 case AArch64::ATOMIC_LOAD_OR_I8:
993 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
994 case AArch64::ATOMIC_LOAD_OR_I16:
995 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
996 case AArch64::ATOMIC_LOAD_OR_I32:
997 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
998 case AArch64::ATOMIC_LOAD_OR_I64:
999 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
1001 case AArch64::ATOMIC_LOAD_XOR_I8:
1002 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
1003 case AArch64::ATOMIC_LOAD_XOR_I16:
1004 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
1005 case AArch64::ATOMIC_LOAD_XOR_I32:
1006 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
1007 case AArch64::ATOMIC_LOAD_XOR_I64:
1008 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
1010 case AArch64::ATOMIC_LOAD_NAND_I8:
1011 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
1012 case AArch64::ATOMIC_LOAD_NAND_I16:
1013 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
1014 case AArch64::ATOMIC_LOAD_NAND_I32:
1015 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
1016 case AArch64::ATOMIC_LOAD_NAND_I64:
1017 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
1019 case AArch64::ATOMIC_LOAD_MIN_I8:
1020 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
1021 case AArch64::ATOMIC_LOAD_MIN_I16:
1022 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
1023 case AArch64::ATOMIC_LOAD_MIN_I32:
1024 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
1025 case AArch64::ATOMIC_LOAD_MIN_I64:
1026 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
1028 case AArch64::ATOMIC_LOAD_MAX_I8:
1029 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
1030 case AArch64::ATOMIC_LOAD_MAX_I16:
1031 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
1032 case AArch64::ATOMIC_LOAD_MAX_I32:
1033 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
1034 case AArch64::ATOMIC_LOAD_MAX_I64:
1035 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
1037 case AArch64::ATOMIC_LOAD_UMIN_I8:
1038 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
1039 case AArch64::ATOMIC_LOAD_UMIN_I16:
1040 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
1041 case AArch64::ATOMIC_LOAD_UMIN_I32:
1042 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
1043 case AArch64::ATOMIC_LOAD_UMIN_I64:
1044 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
1046 case AArch64::ATOMIC_LOAD_UMAX_I8:
1047 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
1048 case AArch64::ATOMIC_LOAD_UMAX_I16:
1049 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
1050 case AArch64::ATOMIC_LOAD_UMAX_I32:
1051 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
1052 case AArch64::ATOMIC_LOAD_UMAX_I64:
1053 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
1055 case AArch64::ATOMIC_SWAP_I8:
1056 return emitAtomicBinary(MI, MBB, 1, 0);
1057 case AArch64::ATOMIC_SWAP_I16:
1058 return emitAtomicBinary(MI, MBB, 2, 0);
1059 case AArch64::ATOMIC_SWAP_I32:
1060 return emitAtomicBinary(MI, MBB, 4, 0);
1061 case AArch64::ATOMIC_SWAP_I64:
1062 return emitAtomicBinary(MI, MBB, 8, 0);
1064 case AArch64::ATOMIC_CMP_SWAP_I8:
1065 return emitAtomicCmpSwap(MI, MBB, 1);
1066 case AArch64::ATOMIC_CMP_SWAP_I16:
1067 return emitAtomicCmpSwap(MI, MBB, 2);
1068 case AArch64::ATOMIC_CMP_SWAP_I32:
1069 return emitAtomicCmpSwap(MI, MBB, 4);
1070 case AArch64::ATOMIC_CMP_SWAP_I64:
1071 return emitAtomicCmpSwap(MI, MBB, 8);
1076 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
1078 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
1079 case AArch64ISD::Call: return "AArch64ISD::Call";
1080 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
1081 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
1082 case AArch64ISD::BFI: return "AArch64ISD::BFI";
1083 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
1084 case AArch64ISD::Ret: return "AArch64ISD::Ret";
1085 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
1086 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
1087 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
1088 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
1089 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
1090 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
1091 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
1092 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
1094 case AArch64ISD::NEON_MOVIMM:
1095 return "AArch64ISD::NEON_MOVIMM";
1096 case AArch64ISD::NEON_MVNIMM:
1097 return "AArch64ISD::NEON_MVNIMM";
1098 case AArch64ISD::NEON_FMOVIMM:
1099 return "AArch64ISD::NEON_FMOVIMM";
1100 case AArch64ISD::NEON_CMP:
1101 return "AArch64ISD::NEON_CMP";
1102 case AArch64ISD::NEON_CMPZ:
1103 return "AArch64ISD::NEON_CMPZ";
1104 case AArch64ISD::NEON_TST:
1105 return "AArch64ISD::NEON_TST";
1106 case AArch64ISD::NEON_QSHLs:
1107 return "AArch64ISD::NEON_QSHLs";
1108 case AArch64ISD::NEON_QSHLu:
1109 return "AArch64ISD::NEON_QSHLu";
1110 case AArch64ISD::NEON_VDUP:
1111 return "AArch64ISD::NEON_VDUP";
1112 case AArch64ISD::NEON_VDUPLANE:
1113 return "AArch64ISD::NEON_VDUPLANE";
1114 case AArch64ISD::NEON_REV16:
1115 return "AArch64ISD::NEON_REV16";
1116 case AArch64ISD::NEON_REV32:
1117 return "AArch64ISD::NEON_REV32";
1118 case AArch64ISD::NEON_REV64:
1119 return "AArch64ISD::NEON_REV64";
1120 case AArch64ISD::NEON_UZP1:
1121 return "AArch64ISD::NEON_UZP1";
1122 case AArch64ISD::NEON_UZP2:
1123 return "AArch64ISD::NEON_UZP2";
1124 case AArch64ISD::NEON_ZIP1:
1125 return "AArch64ISD::NEON_ZIP1";
1126 case AArch64ISD::NEON_ZIP2:
1127 return "AArch64ISD::NEON_ZIP2";
1128 case AArch64ISD::NEON_TRN1:
1129 return "AArch64ISD::NEON_TRN1";
1130 case AArch64ISD::NEON_TRN2:
1131 return "AArch64ISD::NEON_TRN2";
1132 case AArch64ISD::NEON_LD1_UPD:
1133 return "AArch64ISD::NEON_LD1_UPD";
1134 case AArch64ISD::NEON_LD2_UPD:
1135 return "AArch64ISD::NEON_LD2_UPD";
1136 case AArch64ISD::NEON_LD3_UPD:
1137 return "AArch64ISD::NEON_LD3_UPD";
1138 case AArch64ISD::NEON_LD4_UPD:
1139 return "AArch64ISD::NEON_LD4_UPD";
1140 case AArch64ISD::NEON_ST1_UPD:
1141 return "AArch64ISD::NEON_ST1_UPD";
1142 case AArch64ISD::NEON_ST2_UPD:
1143 return "AArch64ISD::NEON_ST2_UPD";
1144 case AArch64ISD::NEON_ST3_UPD:
1145 return "AArch64ISD::NEON_ST3_UPD";
1146 case AArch64ISD::NEON_ST4_UPD:
1147 return "AArch64ISD::NEON_ST4_UPD";
1148 case AArch64ISD::NEON_LD1x2_UPD:
1149 return "AArch64ISD::NEON_LD1x2_UPD";
1150 case AArch64ISD::NEON_LD1x3_UPD:
1151 return "AArch64ISD::NEON_LD1x3_UPD";
1152 case AArch64ISD::NEON_LD1x4_UPD:
1153 return "AArch64ISD::NEON_LD1x4_UPD";
1154 case AArch64ISD::NEON_ST1x2_UPD:
1155 return "AArch64ISD::NEON_ST1x2_UPD";
1156 case AArch64ISD::NEON_ST1x3_UPD:
1157 return "AArch64ISD::NEON_ST1x3_UPD";
1158 case AArch64ISD::NEON_ST1x4_UPD:
1159 return "AArch64ISD::NEON_ST1x4_UPD";
1160 case AArch64ISD::NEON_LD2DUP:
1161 return "AArch64ISD::NEON_LD2DUP";
1162 case AArch64ISD::NEON_LD3DUP:
1163 return "AArch64ISD::NEON_LD3DUP";
1164 case AArch64ISD::NEON_LD4DUP:
1165 return "AArch64ISD::NEON_LD4DUP";
1166 case AArch64ISD::NEON_LD2DUP_UPD:
1167 return "AArch64ISD::NEON_LD2DUP_UPD";
1168 case AArch64ISD::NEON_LD3DUP_UPD:
1169 return "AArch64ISD::NEON_LD3DUP_UPD";
1170 case AArch64ISD::NEON_LD4DUP_UPD:
1171 return "AArch64ISD::NEON_LD4DUP_UPD";
1172 case AArch64ISD::NEON_LD2LN_UPD:
1173 return "AArch64ISD::NEON_LD2LN_UPD";
1174 case AArch64ISD::NEON_LD3LN_UPD:
1175 return "AArch64ISD::NEON_LD3LN_UPD";
1176 case AArch64ISD::NEON_LD4LN_UPD:
1177 return "AArch64ISD::NEON_LD4LN_UPD";
1178 case AArch64ISD::NEON_ST2LN_UPD:
1179 return "AArch64ISD::NEON_ST2LN_UPD";
1180 case AArch64ISD::NEON_ST3LN_UPD:
1181 return "AArch64ISD::NEON_ST3LN_UPD";
1182 case AArch64ISD::NEON_ST4LN_UPD:
1183 return "AArch64ISD::NEON_ST4LN_UPD";
1184 case AArch64ISD::NEON_VEXTRACT:
1185 return "AArch64ISD::NEON_VEXTRACT";
1191 static const MCPhysReg AArch64FPRArgRegs[] = {
1192 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
1193 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
1195 static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
1197 static const MCPhysReg AArch64ArgRegs[] = {
1198 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
1199 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
1201 static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
1203 static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
1204 CCValAssign::LocInfo LocInfo,
1205 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1206 // Mark all remaining general purpose registers as allocated. We don't
1207 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
1208 // i64 will go in registers (C.11).
1209 for (unsigned i = 0; i < NumArgRegs; ++i)
1210 State.AllocateReg(AArch64ArgRegs[i]);
1215 #include "AArch64GenCallingConv.inc"
1217 CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
1220 default: llvm_unreachable("Unsupported calling convention");
1221 case CallingConv::Fast:
1222 case CallingConv::C:
1228 AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
1229 SDLoc DL, SDValue &Chain) const {
1230 MachineFunction &MF = DAG.getMachineFunction();
1231 MachineFrameInfo *MFI = MF.getFrameInfo();
1232 AArch64MachineFunctionInfo *FuncInfo
1233 = MF.getInfo<AArch64MachineFunctionInfo>();
1235 SmallVector<SDValue, 8> MemOps;
1237 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
1239 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
1242 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
1244 if (GPRSaveSize != 0) {
1245 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
1247 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
1249 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
1250 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
1251 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
1252 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1253 MachinePointerInfo::getStack(i * 8),
1255 MemOps.push_back(Store);
1256 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1257 DAG.getConstant(8, getPointerTy()));
1261 if (getSubtarget()->hasFPARMv8()) {
1262 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
1264 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
1265 // can omit a register save area if we know we'll never use registers of
1267 if (FPRSaveSize != 0) {
1268 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
1270 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
1272 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
1273 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
1274 &AArch64::FPR128RegClass);
1275 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
1276 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1277 MachinePointerInfo::getStack(i * 16),
1279 MemOps.push_back(Store);
1280 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1281 DAG.getConstant(16, getPointerTy()));
1284 FuncInfo->setVariadicFPRIdx(FPRIdx);
1285 FuncInfo->setVariadicFPRSize(FPRSaveSize);
1288 unsigned StackOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), 8);
1289 int StackIdx = MFI->CreateFixedObject(8, StackOffset, true);
1291 FuncInfo->setVariadicStackIdx(StackIdx);
1292 FuncInfo->setVariadicGPRIdx(GPRIdx);
1293 FuncInfo->setVariadicGPRSize(GPRSaveSize);
1295 if (!MemOps.empty()) {
1296 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
1303 AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
1304 CallingConv::ID CallConv, bool isVarArg,
1305 const SmallVectorImpl<ISD::InputArg> &Ins,
1306 SDLoc dl, SelectionDAG &DAG,
1307 SmallVectorImpl<SDValue> &InVals) const {
1308 MachineFunction &MF = DAG.getMachineFunction();
1309 AArch64MachineFunctionInfo *FuncInfo
1310 = MF.getInfo<AArch64MachineFunctionInfo>();
1311 MachineFrameInfo *MFI = MF.getFrameInfo();
1312 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1314 SmallVector<CCValAssign, 16> ArgLocs;
1315 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1316 getTargetMachine(), ArgLocs, *DAG.getContext());
1317 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
1319 SmallVector<SDValue, 16> ArgValues;
1322 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1323 CCValAssign &VA = ArgLocs[i];
1324 ISD::ArgFlagsTy Flags = Ins[i].Flags;
1326 if (Flags.isByVal()) {
1327 // Byval is used for small structs and HFAs in the PCS, but the system
1328 // should work in a non-compliant manner for larger structs.
1329 EVT PtrTy = getPointerTy();
1330 int Size = Flags.getByValSize();
1331 unsigned NumRegs = (Size + 7) / 8;
1333 uint32_t BEAlign = 0;
1334 if (Size < 8 && !getSubtarget()->isLittle())
1336 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
1337 VA.getLocMemOffset() + BEAlign,
1339 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
1340 InVals.push_back(FrameIdxN);
1343 } else if (VA.isRegLoc()) {
1344 MVT RegVT = VA.getLocVT();
1345 const TargetRegisterClass *RC = getRegClassFor(RegVT);
1346 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1348 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1349 } else { // VA.isRegLoc()
1350 assert(VA.isMemLoc());
1352 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
1353 VA.getLocMemOffset(), true);
1355 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1356 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1357 MachinePointerInfo::getFixedStack(FI),
1358 false, false, false, 0);
1363 switch (VA.getLocInfo()) {
1364 default: llvm_unreachable("Unknown loc info!");
1365 case CCValAssign::Full: break;
1366 case CCValAssign::BCvt:
1367 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
1369 case CCValAssign::SExt:
1370 case CCValAssign::ZExt:
1371 case CCValAssign::AExt:
1372 case CCValAssign::FPExt: {
1373 unsigned DestSize = VA.getValVT().getSizeInBits();
1374 unsigned DestSubReg;
1377 case 8: DestSubReg = AArch64::sub_8; break;
1378 case 16: DestSubReg = AArch64::sub_16; break;
1379 case 32: DestSubReg = AArch64::sub_32; break;
1380 case 64: DestSubReg = AArch64::sub_64; break;
1381 default: llvm_unreachable("Unexpected argument promotion");
1384 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1385 VA.getValVT(), ArgValue,
1386 DAG.getTargetConstant(DestSubReg, MVT::i32)),
1392 InVals.push_back(ArgValue);
1396 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
1398 unsigned StackArgSize = CCInfo.getNextStackOffset();
1399 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
1400 // This is a non-standard ABI so by fiat I say we're allowed to make full
1401 // use of the stack area to be popped, which must be aligned to 16 bytes in
1403 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
1405 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
1406 // a multiple of 16.
1407 FuncInfo->setArgumentStackToRestore(StackArgSize);
1409 // This realignment carries over to the available bytes below. Our own
1410 // callers will guarantee the space is free by giving an aligned value to
1413 // Even if we're not expected to free up the space, it's useful to know how
1414 // much is there while considering tail calls (because we can reuse it).
1415 FuncInfo->setBytesInStackArgArea(StackArgSize);
1421 AArch64TargetLowering::LowerReturn(SDValue Chain,
1422 CallingConv::ID CallConv, bool isVarArg,
1423 const SmallVectorImpl<ISD::OutputArg> &Outs,
1424 const SmallVectorImpl<SDValue> &OutVals,
1425 SDLoc dl, SelectionDAG &DAG) const {
1426 // CCValAssign - represent the assignment of the return value to a location.
1427 SmallVector<CCValAssign, 16> RVLocs;
1429 // CCState - Info about the registers and stack slots.
1430 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1431 getTargetMachine(), RVLocs, *DAG.getContext());
1433 // Analyze outgoing return values.
1434 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1437 SmallVector<SDValue, 4> RetOps(1, Chain);
1439 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1440 // PCS: "If the type, T, of the result of a function is such that
1441 // void func(T arg) would require that arg be passed as a value in a
1442 // register (or set of registers) according to the rules in 5.4, then the
1443 // result is returned in the same registers as would be used for such an
1446 // Otherwise, the caller shall reserve a block of memory of sufficient
1447 // size and alignment to hold the result. The address of the memory block
1448 // shall be passed as an additional argument to the function in x8."
1450 // This is implemented in two places. The register-return values are dealt
1451 // with here, more complex returns are passed as an sret parameter, which
1452 // means we don't have to worry about it during actual return.
1453 CCValAssign &VA = RVLocs[i];
1454 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1457 SDValue Arg = OutVals[i];
1459 // There's no convenient note in the ABI about this as there is for normal
1460 // arguments, but it says return values are passed in the same registers as
1461 // an argument would be. I believe that includes the comments about
1462 // unspecified higher bits, putting the burden of widening on the *caller*
1463 // for return values.
1464 switch (VA.getLocInfo()) {
1465 default: llvm_unreachable("Unknown loc info");
1466 case CCValAssign::Full: break;
1467 case CCValAssign::SExt:
1468 case CCValAssign::ZExt:
1469 case CCValAssign::AExt:
1470 // Floating-point values should only be extended when they're going into
1471 // memory, which can't happen here so an integer extend is acceptable.
1472 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1474 case CCValAssign::BCvt:
1475 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1479 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1480 Flag = Chain.getValue(1);
1481 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1484 RetOps[0] = Chain; // Update chain.
1486 // Add the flag if we have it.
1488 RetOps.push_back(Flag);
1490 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1491 &RetOps[0], RetOps.size());
1494 unsigned AArch64TargetLowering::getByValTypeAlignment(Type *Ty) const {
1495 // This is a new backend. For anything more precise than this a FE should
1496 // set an explicit alignment.
1501 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1502 SmallVectorImpl<SDValue> &InVals) const {
1503 SelectionDAG &DAG = CLI.DAG;
1505 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1506 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1507 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1508 SDValue Chain = CLI.Chain;
1509 SDValue Callee = CLI.Callee;
1510 bool &IsTailCall = CLI.IsTailCall;
1511 CallingConv::ID CallConv = CLI.CallConv;
1512 bool IsVarArg = CLI.IsVarArg;
1514 MachineFunction &MF = DAG.getMachineFunction();
1515 AArch64MachineFunctionInfo *FuncInfo
1516 = MF.getInfo<AArch64MachineFunctionInfo>();
1517 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1518 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1519 bool IsSibCall = false;
1522 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1523 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1524 Outs, OutVals, Ins, DAG);
1526 // A sibling call is one where we're under the usual C ABI and not planning
1527 // to change that but can still do a tail call:
1528 if (!TailCallOpt && IsTailCall)
1532 SmallVector<CCValAssign, 16> ArgLocs;
1533 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1534 getTargetMachine(), ArgLocs, *DAG.getContext());
1535 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1537 // On AArch64 (and all other architectures I'm aware of) the most this has to
1538 // do is adjust the stack pointer.
1539 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1541 // Since we're not changing the ABI to make this a tail call, the memory
1542 // operands are already available in the caller's incoming argument space.
1546 // FPDiff is the byte offset of the call's argument area from the callee's.
1547 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1548 // by this amount for a tail call. In a sibling call it must be 0 because the
1549 // caller will deallocate the entire stack and the callee still expects its
1550 // arguments to begin at SP+0. Completely unused for non-tail calls.
1553 if (IsTailCall && !IsSibCall) {
1554 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1556 // FPDiff will be negative if this tail call requires more space than we
1557 // would automatically have in our incoming argument space. Positive if we
1558 // can actually shrink the stack.
1559 FPDiff = NumReusableBytes - NumBytes;
1561 // The stack pointer must be 16-byte aligned at all times it's used for a
1562 // memory operation, which in practice means at *all* times and in
1563 // particular across call boundaries. Therefore our own arguments started at
1564 // a 16-byte aligned SP and the delta applied for the tail call should
1565 // satisfy the same constraint.
1566 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1570 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
1573 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1576 SmallVector<SDValue, 8> MemOpChains;
1577 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1579 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1580 CCValAssign &VA = ArgLocs[i];
1581 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1582 SDValue Arg = OutVals[i];
1584 // Callee does the actual widening, so all extensions just use an implicit
1585 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1586 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1587 // alternative works on integer types too.
1588 switch (VA.getLocInfo()) {
1589 default: llvm_unreachable("Unknown loc info!");
1590 case CCValAssign::Full: break;
1591 case CCValAssign::SExt:
1592 case CCValAssign::ZExt:
1593 case CCValAssign::AExt:
1594 case CCValAssign::FPExt: {
1595 unsigned SrcSize = VA.getValVT().getSizeInBits();
1599 case 8: SrcSubReg = AArch64::sub_8; break;
1600 case 16: SrcSubReg = AArch64::sub_16; break;
1601 case 32: SrcSubReg = AArch64::sub_32; break;
1602 case 64: SrcSubReg = AArch64::sub_64; break;
1603 default: llvm_unreachable("Unexpected argument promotion");
1606 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1608 DAG.getUNDEF(VA.getLocVT()),
1610 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1615 case CCValAssign::BCvt:
1616 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1620 if (VA.isRegLoc()) {
1621 // A normal register (sub-) argument. For now we just note it down because
1622 // we want to copy things into registers as late as possible to avoid
1623 // register-pressure (and possibly worse).
1624 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1628 assert(VA.isMemLoc() && "unexpected argument location");
1631 MachinePointerInfo DstInfo;
1633 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1634 VA.getLocVT().getSizeInBits();
1635 OpSize = (OpSize + 7) / 8;
1636 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1637 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1639 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1640 DstInfo = MachinePointerInfo::getFixedStack(FI);
1642 // Make sure any stack arguments overlapping with where we're storing are
1643 // loaded before this eventual operation. Otherwise they'll be clobbered.
1644 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1646 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize()*8 :
1647 VA.getLocVT().getSizeInBits();
1648 OpSize = (OpSize + 7) / 8;
1649 uint32_t BEAlign = 0;
1650 if (OpSize < 8 && !getSubtarget()->isLittle())
1652 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + BEAlign);
1654 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1655 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1658 if (Flags.isByVal()) {
1659 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1660 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1661 Flags.getByValAlign(),
1662 /*isVolatile = */ false,
1663 /*alwaysInline = */ false,
1664 DstInfo, MachinePointerInfo());
1665 MemOpChains.push_back(Cpy);
1667 // Normal stack argument, put it where it's needed.
1668 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1670 MemOpChains.push_back(Store);
1674 // The loads and stores generated above shouldn't clash with each
1675 // other. Combining them with this TokenFactor notes that fact for the rest of
1677 if (!MemOpChains.empty())
1678 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1679 &MemOpChains[0], MemOpChains.size());
1681 // Most of the rest of the instructions need to be glued together; we don't
1682 // want assignments to actual registers used by a call to be rearranged by a
1683 // well-meaning scheduler.
1686 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1687 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1688 RegsToPass[i].second, InFlag);
1689 InFlag = Chain.getValue(1);
1692 // The linker is responsible for inserting veneers when necessary to put a
1693 // function call destination in range, so we don't need to bother with a
1695 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1696 const GlobalValue *GV = G->getGlobal();
1697 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1698 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1699 const char *Sym = S->getSymbol();
1700 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1703 // We don't usually want to end the call-sequence here because we would tidy
1704 // the frame up *after* the call, however in the ABI-changing tail-call case
1705 // we've carefully laid out the parameters so that when sp is reset they'll be
1706 // in the correct location.
1707 if (IsTailCall && !IsSibCall) {
1708 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1709 DAG.getIntPtrConstant(0, true), InFlag, dl);
1710 InFlag = Chain.getValue(1);
1713 // We produce the following DAG scheme for the actual call instruction:
1714 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1716 // Most arguments aren't going to be used and just keep the values live as
1717 // far as LLVM is concerned. It's expected to be selected as simply "bl
1718 // callee" (for a direct, non-tail call).
1719 std::vector<SDValue> Ops;
1720 Ops.push_back(Chain);
1721 Ops.push_back(Callee);
1724 // Each tail call may have to adjust the stack by a different amount, so
1725 // this information must travel along with the operation for eventual
1726 // consumption by emitEpilogue.
1727 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1730 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1731 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1732 RegsToPass[i].second.getValueType()));
1735 // Add a register mask operand representing the call-preserved registers. This
1736 // is used later in codegen to constrain register-allocation.
1737 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1738 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1739 assert(Mask && "Missing call preserved mask for calling convention");
1740 Ops.push_back(DAG.getRegisterMask(Mask));
1742 // If we needed glue, put it in as the last argument.
1743 if (InFlag.getNode())
1744 Ops.push_back(InFlag);
1746 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1749 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1752 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1753 InFlag = Chain.getValue(1);
1755 // Now we can reclaim the stack, just as well do it before working out where
1756 // our return value is.
1758 uint64_t CalleePopBytes
1759 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1761 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1762 DAG.getIntPtrConstant(CalleePopBytes, true),
1764 InFlag = Chain.getValue(1);
1767 return LowerCallResult(Chain, InFlag, CallConv,
1768 IsVarArg, Ins, dl, DAG, InVals);
1772 AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1773 CallingConv::ID CallConv, bool IsVarArg,
1774 const SmallVectorImpl<ISD::InputArg> &Ins,
1775 SDLoc dl, SelectionDAG &DAG,
1776 SmallVectorImpl<SDValue> &InVals) const {
1777 // Assign locations to each value returned by this call.
1778 SmallVector<CCValAssign, 16> RVLocs;
1779 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1780 getTargetMachine(), RVLocs, *DAG.getContext());
1781 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1783 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1784 CCValAssign VA = RVLocs[i];
1786 // Return values that are too big to fit into registers should use an sret
1787 // pointer, so this can be a lot simpler than the main argument code.
1788 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1790 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1792 Chain = Val.getValue(1);
1793 InFlag = Val.getValue(2);
1795 switch (VA.getLocInfo()) {
1796 default: llvm_unreachable("Unknown loc info!");
1797 case CCValAssign::Full: break;
1798 case CCValAssign::BCvt:
1799 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1801 case CCValAssign::ZExt:
1802 case CCValAssign::SExt:
1803 case CCValAssign::AExt:
1804 // Floating-point arguments only get extended/truncated if they're going
1805 // in memory, so using the integer operation is acceptable here.
1806 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1810 InVals.push_back(Val);
1817 AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1818 CallingConv::ID CalleeCC,
1820 bool IsCalleeStructRet,
1821 bool IsCallerStructRet,
1822 const SmallVectorImpl<ISD::OutputArg> &Outs,
1823 const SmallVectorImpl<SDValue> &OutVals,
1824 const SmallVectorImpl<ISD::InputArg> &Ins,
1825 SelectionDAG& DAG) const {
1827 // For CallingConv::C this function knows whether the ABI needs
1828 // changing. That's not true for other conventions so they will have to opt in
1830 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1833 const MachineFunction &MF = DAG.getMachineFunction();
1834 const Function *CallerF = MF.getFunction();
1835 CallingConv::ID CallerCC = CallerF->getCallingConv();
1836 bool CCMatch = CallerCC == CalleeCC;
1838 // Byval parameters hand the function a pointer directly into the stack area
1839 // we want to reuse during a tail call. Working around this *is* possible (see
1840 // X86) but less efficient and uglier in LowerCall.
1841 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1842 e = CallerF->arg_end(); i != e; ++i)
1843 if (i->hasByValAttr())
1846 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1847 if (IsTailCallConvention(CalleeCC) && CCMatch)
1852 // Now we search for cases where we can use a tail call without changing the
1853 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1856 // I want anyone implementing a new calling convention to think long and hard
1857 // about this assert.
1858 assert((!IsVarArg || CalleeCC == CallingConv::C)
1859 && "Unexpected variadic calling convention");
1861 if (IsVarArg && !Outs.empty()) {
1862 // At least two cases here: if caller is fastcc then we can't have any
1863 // memory arguments (we'd be expected to clean up the stack afterwards). If
1864 // caller is C then we could potentially use its argument area.
1866 // FIXME: for now we take the most conservative of these in both cases:
1867 // disallow all variadic memory operands.
1868 SmallVector<CCValAssign, 16> ArgLocs;
1869 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1870 getTargetMachine(), ArgLocs, *DAG.getContext());
1872 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1873 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1874 if (!ArgLocs[i].isRegLoc())
1878 // If the calling conventions do not match, then we'd better make sure the
1879 // results are returned in the same way as what the caller expects.
1881 SmallVector<CCValAssign, 16> RVLocs1;
1882 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1883 getTargetMachine(), RVLocs1, *DAG.getContext());
1884 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1886 SmallVector<CCValAssign, 16> RVLocs2;
1887 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1888 getTargetMachine(), RVLocs2, *DAG.getContext());
1889 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1891 if (RVLocs1.size() != RVLocs2.size())
1893 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1894 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1896 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1898 if (RVLocs1[i].isRegLoc()) {
1899 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1902 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1908 // Nothing more to check if the callee is taking no arguments
1912 SmallVector<CCValAssign, 16> ArgLocs;
1913 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1914 getTargetMachine(), ArgLocs, *DAG.getContext());
1916 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1918 const AArch64MachineFunctionInfo *FuncInfo
1919 = MF.getInfo<AArch64MachineFunctionInfo>();
1921 // If the stack arguments for this call would fit into our own save area then
1922 // the call can be made tail.
1923 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1926 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1927 bool TailCallOpt) const {
1928 return CallCC == CallingConv::Fast && TailCallOpt;
1931 bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1932 return CallCC == CallingConv::Fast;
1935 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1937 MachineFrameInfo *MFI,
1938 int ClobberedFI) const {
1939 SmallVector<SDValue, 8> ArgChains;
1940 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1941 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1943 // Include the original chain at the beginning of the list. When this is
1944 // used by target LowerCall hooks, this helps legalize find the
1945 // CALLSEQ_BEGIN node.
1946 ArgChains.push_back(Chain);
1948 // Add a chain value for each stack argument corresponding
1949 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1950 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1951 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1952 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1953 if (FI->getIndex() < 0) {
1954 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1955 int64_t InLastByte = InFirstByte;
1956 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1958 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1959 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1960 ArgChains.push_back(SDValue(L, 1));
1963 // Build a tokenfactor for all the chains.
1964 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
1965 &ArgChains[0], ArgChains.size());
1968 static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1970 case ISD::SETEQ: return A64CC::EQ;
1971 case ISD::SETGT: return A64CC::GT;
1972 case ISD::SETGE: return A64CC::GE;
1973 case ISD::SETLT: return A64CC::LT;
1974 case ISD::SETLE: return A64CC::LE;
1975 case ISD::SETNE: return A64CC::NE;
1976 case ISD::SETUGT: return A64CC::HI;
1977 case ISD::SETUGE: return A64CC::HS;
1978 case ISD::SETULT: return A64CC::LO;
1979 case ISD::SETULE: return A64CC::LS;
1980 default: llvm_unreachable("Unexpected condition code");
1984 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1985 // icmp is implemented using adds/subs immediate, which take an unsigned
1986 // 12-bit immediate, optionally shifted left by 12 bits.
1988 // Symmetric by using adds/subs
1992 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1995 SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1996 ISD::CondCode CC, SDValue &A64cc,
1997 SelectionDAG &DAG, SDLoc &dl) const {
1998 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
2000 EVT VT = RHSC->getValueType(0);
2001 bool knownInvalid = false;
2003 // I'm not convinced the rest of LLVM handles these edge cases properly, but
2004 // we can at least get it right.
2005 if (isSignedIntSetCC(CC)) {
2006 C = RHSC->getSExtValue();
2007 } else if (RHSC->getZExtValue() > INT64_MAX) {
2008 // A 64-bit constant not representable by a signed 64-bit integer is far
2009 // too big to fit into a SUBS immediate anyway.
2010 knownInvalid = true;
2012 C = RHSC->getZExtValue();
2015 if (!knownInvalid && !isLegalICmpImmediate(C)) {
2016 // Constant does not fit, try adjusting it by one?
2021 if (isLegalICmpImmediate(C-1)) {
2022 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
2023 RHS = DAG.getConstant(C-1, VT);
2028 if (isLegalICmpImmediate(C-1)) {
2029 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
2030 RHS = DAG.getConstant(C-1, VT);
2035 if (isLegalICmpImmediate(C+1)) {
2036 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
2037 RHS = DAG.getConstant(C+1, VT);
2042 if (isLegalICmpImmediate(C+1)) {
2043 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2044 RHS = DAG.getConstant(C+1, VT);
2051 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
2052 A64cc = DAG.getConstant(CondCode, MVT::i32);
2053 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2054 DAG.getCondCode(CC));
2057 static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
2058 A64CC::CondCodes &Alternative) {
2059 A64CC::CondCodes CondCode = A64CC::Invalid;
2060 Alternative = A64CC::Invalid;
2063 default: llvm_unreachable("Unknown FP condition!");
2065 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
2067 case ISD::SETOGT: CondCode = A64CC::GT; break;
2069 case ISD::SETOGE: CondCode = A64CC::GE; break;
2070 case ISD::SETOLT: CondCode = A64CC::MI; break;
2071 case ISD::SETOLE: CondCode = A64CC::LS; break;
2072 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
2073 case ISD::SETO: CondCode = A64CC::VC; break;
2074 case ISD::SETUO: CondCode = A64CC::VS; break;
2075 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
2076 case ISD::SETUGT: CondCode = A64CC::HI; break;
2077 case ISD::SETUGE: CondCode = A64CC::PL; break;
2079 case ISD::SETULT: CondCode = A64CC::LT; break;
2081 case ISD::SETULE: CondCode = A64CC::LE; break;
2083 case ISD::SETUNE: CondCode = A64CC::NE; break;
2089 AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
2091 EVT PtrVT = getPointerTy();
2092 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2094 switch(getTargetMachine().getCodeModel()) {
2095 case CodeModel::Small:
2096 // The most efficient code is PC-relative anyway for the small memory model,
2097 // so we don't need to worry about relocation model.
2098 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2099 DAG.getTargetBlockAddress(BA, PtrVT, 0,
2100 AArch64II::MO_NO_FLAG),
2101 DAG.getTargetBlockAddress(BA, PtrVT, 0,
2102 AArch64II::MO_LO12),
2103 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
2104 case CodeModel::Large:
2106 AArch64ISD::WrapperLarge, DL, PtrVT,
2107 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
2108 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2109 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2110 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2112 llvm_unreachable("Only small and large code models supported now");
2117 // (BRCOND chain, val, dest)
2119 AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2121 SDValue Chain = Op.getOperand(0);
2122 SDValue TheBit = Op.getOperand(1);
2123 SDValue DestBB = Op.getOperand(2);
2125 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2126 // that as the consumer we are responsible for ignoring rubbish in higher
2128 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2129 DAG.getConstant(1, MVT::i32));
2131 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2132 DAG.getConstant(0, TheBit.getValueType()),
2133 DAG.getCondCode(ISD::SETNE));
2135 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
2136 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
2140 // (BR_CC chain, condcode, lhs, rhs, dest)
2142 AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2144 SDValue Chain = Op.getOperand(0);
2145 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2146 SDValue LHS = Op.getOperand(2);
2147 SDValue RHS = Op.getOperand(3);
2148 SDValue DestBB = Op.getOperand(4);
2150 if (LHS.getValueType() == MVT::f128) {
2151 // f128 comparisons are lowered to runtime calls by a routine which sets
2152 // LHS, RHS and CC appropriately for the rest of this function to continue.
2153 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2155 // If softenSetCCOperands returned a scalar, we need to compare the result
2156 // against zero to select between true and false values.
2157 if (RHS.getNode() == 0) {
2158 RHS = DAG.getConstant(0, LHS.getValueType());
2163 if (LHS.getValueType().isInteger()) {
2166 // Integers are handled in a separate function because the combinations of
2167 // immediates and tests can get hairy and we may want to fiddle things.
2168 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2170 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2171 Chain, CmpOp, A64cc, DestBB);
2174 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2175 // conditional branch, hence FPCCToA64CC can set a second test, where either
2176 // passing is sufficient.
2177 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2178 CondCode = FPCCToA64CC(CC, Alternative);
2179 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2180 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2181 DAG.getCondCode(CC));
2182 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2183 Chain, SetCC, A64cc, DestBB);
2185 if (Alternative != A64CC::Invalid) {
2186 A64cc = DAG.getConstant(Alternative, MVT::i32);
2187 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2188 A64BR_CC, SetCC, A64cc, DestBB);
2196 AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
2197 RTLIB::Libcall Call) const {
2200 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
2201 EVT ArgVT = Op.getOperand(i).getValueType();
2202 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2203 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
2204 Entry.isSExt = false;
2205 Entry.isZExt = false;
2206 Args.push_back(Entry);
2208 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
2210 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2212 // By default, the input chain to this libcall is the entry node of the
2213 // function. If the libcall is going to be emitted as a tail call then
2214 // isUsedByReturnOnly will change it to the right chain if the return
2215 // node which is being folded has a non-entry input chain.
2216 SDValue InChain = DAG.getEntryNode();
2218 // isTailCall may be true since the callee does not reference caller stack
2219 // frame. Check if it's in the right position.
2220 SDValue TCChain = InChain;
2221 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
2226 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
2227 0, getLibcallCallingConv(Call), isTailCall,
2228 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2229 Callee, Args, DAG, SDLoc(Op));
2230 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2232 if (!CallInfo.second.getNode())
2233 // It's a tailcall, return the chain (which is the DAG root).
2234 return DAG.getRoot();
2236 return CallInfo.first;
2240 AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
2241 if (Op.getOperand(0).getValueType() != MVT::f128) {
2242 // It's legal except when f128 is involved
2247 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
2249 SDValue SrcVal = Op.getOperand(0);
2250 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
2251 /*isSigned*/ false, SDLoc(Op)).first;
2255 AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
2256 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
2259 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
2261 return LowerF128ToCall(Op, DAG, LC);
2264 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2267 EVT VT = Op.getValueType();
2268 SDValue Vec = Op.getOperand(0);
2269 EVT OpVT = Vec.getValueType();
2270 unsigned Opc = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
2272 if (VT.getVectorNumElements() == 1) {
2273 assert(OpVT == MVT::v1f64 && "Unexpected vector type!");
2274 if (VT.getSizeInBits() == OpVT.getSizeInBits())
2276 return DAG.UnrollVectorOp(Op.getNode());
2279 if (VT.getSizeInBits() > OpVT.getSizeInBits()) {
2280 assert(Vec.getValueType() == MVT::v2f32 && VT == MVT::v2i64 &&
2281 "Unexpected vector type!");
2282 Vec = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Vec);
2283 return DAG.getNode(Opc, dl, VT, Vec);
2284 } else if (VT.getSizeInBits() < OpVT.getSizeInBits()) {
2285 EVT CastVT = EVT::getIntegerVT(*DAG.getContext(),
2286 OpVT.getVectorElementType().getSizeInBits());
2288 EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements());
2289 Vec = DAG.getNode(Opc, dl, CastVT, Vec);
2290 return DAG.getNode(ISD::TRUNCATE, dl, VT, Vec);
2292 return DAG.getNode(Opc, dl, VT, Vec);
2295 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
2296 // We custom lower concat_vectors with 4, 8, or 16 operands that are all the
2297 // same operand and of type v1* using the DUP instruction.
2298 unsigned NumOps = Op->getNumOperands();
2300 assert(Op.getValueType().getSizeInBits() == 128 && "unexpected concat");
2304 if (NumOps != 4 && NumOps != 8 && NumOps != 16)
2307 // Must be a single value for VDUP.
2308 SDValue Op0 = Op.getOperand(0);
2309 for (unsigned i = 1; i < NumOps; ++i) {
2310 SDValue OpN = Op.getOperand(i);
2315 // Verify the value type.
2316 EVT EltVT = Op0.getValueType();
2318 default: llvm_unreachable("Unexpected number of operands");
2320 if (EltVT != MVT::v1i16 && EltVT != MVT::v1i32)
2324 if (EltVT != MVT::v1i8 && EltVT != MVT::v1i16)
2328 if (EltVT != MVT::v1i8)
2334 EVT VT = Op.getValueType();
2335 // VDUP produces better code for constants.
2336 if (Op0->getOpcode() == ISD::BUILD_VECTOR)
2337 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Op0->getOperand(0));
2338 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, Op0,
2339 DAG.getConstant(0, MVT::i64));
2343 AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2344 bool IsSigned) const {
2345 if (Op.getValueType().isVector())
2346 return LowerVectorFP_TO_INT(Op, DAG, IsSigned);
2347 if (Op.getOperand(0).getValueType() != MVT::f128) {
2348 // It's legal except when f128 is involved
2354 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
2356 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
2358 return LowerF128ToCall(Op, DAG, LC);
2361 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
2362 MachineFunction &MF = DAG.getMachineFunction();
2363 MachineFrameInfo *MFI = MF.getFrameInfo();
2364 MFI->setReturnAddressIsTaken(true);
2366 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2369 EVT VT = Op.getValueType();
2371 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2373 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
2374 SDValue Offset = DAG.getConstant(8, MVT::i64);
2375 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
2376 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
2377 MachinePointerInfo(), false, false, false, 0);
2380 // Return X30, which contains the return address. Mark it an implicit live-in.
2381 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
2382 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
2386 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
2388 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2389 MFI->setFrameAddressIsTaken(true);
2391 EVT VT = Op.getValueType();
2393 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2394 unsigned FrameReg = AArch64::X29;
2395 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2397 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
2398 MachinePointerInfo(),
2399 false, false, false, 0);
2404 AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
2405 SelectionDAG &DAG) const {
2406 assert(getTargetMachine().getCodeModel() == CodeModel::Large);
2407 assert(getTargetMachine().getRelocationModel() == Reloc::Static);
2409 EVT PtrVT = getPointerTy();
2411 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2412 const GlobalValue *GV = GN->getGlobal();
2414 SDValue GlobalAddr = DAG.getNode(
2415 AArch64ISD::WrapperLarge, dl, PtrVT,
2416 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
2417 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2418 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2419 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2421 if (GN->getOffset() != 0)
2422 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2423 DAG.getConstant(GN->getOffset(), PtrVT));
2429 AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
2430 SelectionDAG &DAG) const {
2431 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
2433 EVT PtrVT = getPointerTy();
2435 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2436 const GlobalValue *GV = GN->getGlobal();
2437 unsigned Alignment = GV->getAlignment();
2438 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2439 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
2440 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
2441 // to zero when they remain undefined. In PIC mode the GOT can take care of
2442 // this, but in absolute mode we use a constant pool load.
2444 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2445 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2446 AArch64II::MO_NO_FLAG),
2447 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2448 AArch64II::MO_LO12),
2449 DAG.getConstant(8, MVT::i32));
2450 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
2451 MachinePointerInfo::getConstantPool(),
2452 /*isVolatile=*/ false,
2453 /*isNonTemporal=*/ true,
2454 /*isInvariant=*/ true, 8);
2455 if (GN->getOffset() != 0)
2456 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2457 DAG.getConstant(GN->getOffset(), PtrVT));
2462 if (Alignment == 0) {
2463 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
2464 if (GVPtrTy->getElementType()->isSized()) {
2466 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
2468 // Be conservative if we can't guess, not that it really matters:
2469 // functions and labels aren't valid for loads, and the methods used to
2470 // actually calculate an address work with any alignment.
2475 unsigned char HiFixup, LoFixup;
2476 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
2479 HiFixup = AArch64II::MO_GOT;
2480 LoFixup = AArch64II::MO_GOT_LO12;
2483 HiFixup = AArch64II::MO_NO_FLAG;
2484 LoFixup = AArch64II::MO_LO12;
2487 // AArch64's small model demands the following sequence:
2488 // ADRP x0, somewhere
2489 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
2490 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2491 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2493 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2495 DAG.getConstant(Alignment, MVT::i32));
2498 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
2502 if (GN->getOffset() != 0)
2503 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
2504 DAG.getConstant(GN->getOffset(), PtrVT));
2510 AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
2511 SelectionDAG &DAG) const {
2512 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
2513 // we make those distinctions here.
2515 switch (getTargetMachine().getCodeModel()) {
2516 case CodeModel::Small:
2517 return LowerGlobalAddressELFSmall(Op, DAG);
2518 case CodeModel::Large:
2519 return LowerGlobalAddressELFLarge(Op, DAG);
2521 llvm_unreachable("Only small and large code models supported now");
2526 AArch64TargetLowering::LowerConstantPool(SDValue Op,
2527 SelectionDAG &DAG) const {
2529 EVT PtrVT = getPointerTy();
2530 ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Op);
2531 const Constant *C = CN->getConstVal();
2533 switch(getTargetMachine().getCodeModel()) {
2534 case CodeModel::Small:
2535 // The most efficient code is PC-relative anyway for the small memory model,
2536 // so we don't need to worry about relocation model.
2537 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2538 DAG.getTargetConstantPool(C, PtrVT, 0, 0,
2539 AArch64II::MO_NO_FLAG),
2540 DAG.getTargetConstantPool(C, PtrVT, 0, 0,
2541 AArch64II::MO_LO12),
2542 DAG.getConstant(CN->getAlignment(), MVT::i32));
2543 case CodeModel::Large:
2545 AArch64ISD::WrapperLarge, DL, PtrVT,
2546 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
2547 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
2548 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
2549 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC));
2551 llvm_unreachable("Only small and large code models supported now");
2555 SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
2558 SelectionDAG &DAG) const {
2559 EVT PtrVT = getPointerTy();
2561 // The function we need to call is simply the first entry in the GOT for this
2562 // descriptor, load it in preparation.
2563 SDValue Func, Chain;
2564 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2567 // The function takes only one argument: the address of the descriptor itself
2570 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
2571 Glue = Chain.getValue(1);
2573 // Finally, there's a special calling-convention which means that the lookup
2574 // must preserve all registers (except X0, obviously).
2575 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2576 const AArch64RegisterInfo *A64RI
2577 = static_cast<const AArch64RegisterInfo *>(TRI);
2578 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2580 // We're now ready to populate the argument list, as with a normal call:
2581 std::vector<SDValue> Ops;
2582 Ops.push_back(Chain);
2583 Ops.push_back(Func);
2584 Ops.push_back(SymAddr);
2585 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2586 Ops.push_back(DAG.getRegisterMask(Mask));
2587 Ops.push_back(Glue);
2589 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2590 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2592 Glue = Chain.getValue(1);
2594 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2595 // back to the generic handling code.
2596 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2600 AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2601 SelectionDAG &DAG) const {
2602 assert(getSubtarget()->isTargetELF() &&
2603 "TLS not implemented for non-ELF targets");
2604 assert(getTargetMachine().getCodeModel() == CodeModel::Small
2605 && "TLS only supported in small memory model");
2606 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2608 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2611 EVT PtrVT = getPointerTy();
2613 const GlobalValue *GV = GA->getGlobal();
2615 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2617 if (Model == TLSModel::InitialExec) {
2618 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2619 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2620 AArch64II::MO_GOTTPREL),
2621 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2622 AArch64II::MO_GOTTPREL_LO12),
2623 DAG.getConstant(8, MVT::i32));
2624 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2626 } else if (Model == TLSModel::LocalExec) {
2627 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2628 AArch64II::MO_TPREL_G1);
2629 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2630 AArch64II::MO_TPREL_G0_NC);
2632 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2633 DAG.getTargetConstant(1, MVT::i32)), 0);
2634 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2636 DAG.getTargetConstant(0, MVT::i32)), 0);
2637 } else if (Model == TLSModel::GeneralDynamic) {
2638 // Accesses used in this sequence go via the TLS descriptor which lives in
2639 // the GOT. Prepare an address we can use to handle this.
2640 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2641 AArch64II::MO_TLSDESC);
2642 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2643 AArch64II::MO_TLSDESC_LO12);
2644 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2646 DAG.getConstant(8, MVT::i32));
2647 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2649 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2650 } else if (Model == TLSModel::LocalDynamic) {
2651 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2652 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2653 // the beginning of the module's TLS region, followed by a DTPREL offset
2656 // These accesses will need deduplicating if there's more than one.
2657 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2658 .getInfo<AArch64MachineFunctionInfo>();
2659 MFI->incNumLocalDynamicTLSAccesses();
2662 // Get the location of _TLS_MODULE_BASE_:
2663 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2664 AArch64II::MO_TLSDESC);
2665 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2666 AArch64II::MO_TLSDESC_LO12);
2667 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2669 DAG.getConstant(8, MVT::i32));
2670 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2672 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2674 // Get the variable's offset from _TLS_MODULE_BASE_
2675 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2676 AArch64II::MO_DTPREL_G1);
2677 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2678 AArch64II::MO_DTPREL_G0_NC);
2680 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2681 DAG.getTargetConstant(0, MVT::i32)), 0);
2682 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2684 DAG.getTargetConstant(0, MVT::i32)), 0);
2686 llvm_unreachable("Unsupported TLS access model");
2689 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2692 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2695 EVT VT = Op.getValueType();
2696 SDValue Vec = Op.getOperand(0);
2697 unsigned Opc = IsSigned ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
2699 if (VT.getVectorNumElements() == 1) {
2700 assert(VT == MVT::v1f64 && "Unexpected vector type!");
2701 if (VT.getSizeInBits() == Vec.getValueSizeInBits())
2703 return DAG.UnrollVectorOp(Op.getNode());
2706 if (VT.getSizeInBits() < Vec.getValueSizeInBits()) {
2707 assert(Vec.getValueType() == MVT::v2i64 && VT == MVT::v2f32 &&
2708 "Unexpected vector type!");
2709 Vec = DAG.getNode(Opc, dl, MVT::v2f64, Vec);
2710 return DAG.getNode(ISD::FP_ROUND, dl, VT, Vec, DAG.getIntPtrConstant(0));
2711 } else if (VT.getSizeInBits() > Vec.getValueSizeInBits()) {
2712 unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2713 EVT CastVT = EVT::getIntegerVT(*DAG.getContext(),
2714 VT.getVectorElementType().getSizeInBits());
2716 EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements());
2717 Vec = DAG.getNode(CastOpc, dl, CastVT, Vec);
2720 return DAG.getNode(Opc, dl, VT, Vec);
2724 AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2725 bool IsSigned) const {
2726 if (Op.getValueType().isVector())
2727 return LowerVectorINT_TO_FP(Op, DAG, IsSigned);
2728 if (Op.getValueType() != MVT::f128) {
2729 // Legal for everything except f128.
2735 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2737 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2739 return LowerF128ToCall(Op, DAG, LC);
2744 AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2745 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2747 EVT PtrVT = getPointerTy();
2749 // When compiling PIC, jump tables get put in the code section so a static
2750 // relocation-style is acceptable for both cases.
2751 switch (getTargetMachine().getCodeModel()) {
2752 case CodeModel::Small:
2753 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2754 DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2755 DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2756 AArch64II::MO_LO12),
2757 DAG.getConstant(1, MVT::i32));
2758 case CodeModel::Large:
2760 AArch64ISD::WrapperLarge, dl, PtrVT,
2761 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2762 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2763 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2764 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2766 llvm_unreachable("Only small and large code models supported now");
2770 // (SELECT testbit, iftrue, iffalse)
2772 AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2774 SDValue TheBit = Op.getOperand(0);
2775 SDValue IfTrue = Op.getOperand(1);
2776 SDValue IfFalse = Op.getOperand(2);
2778 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2779 // that as the consumer we are responsible for ignoring rubbish in higher
2781 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2782 DAG.getConstant(1, MVT::i32));
2783 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2784 DAG.getConstant(0, TheBit.getValueType()),
2785 DAG.getCondCode(ISD::SETNE));
2787 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2788 A64CMP, IfTrue, IfFalse,
2789 DAG.getConstant(A64CC::NE, MVT::i32));
2792 static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
2794 SDValue LHS = Op.getOperand(0);
2795 SDValue RHS = Op.getOperand(1);
2796 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2797 EVT VT = Op.getValueType();
2798 bool Invert = false;
2802 if (LHS.getValueType().isInteger()) {
2804 // Attempt to use Vector Integer Compare Mask Test instruction.
2805 // TST = icmp ne (and (op0, op1), zero).
2806 if (CC == ISD::SETNE) {
2807 if (((LHS.getOpcode() == ISD::AND) &&
2808 ISD::isBuildVectorAllZeros(RHS.getNode())) ||
2809 ((RHS.getOpcode() == ISD::AND) &&
2810 ISD::isBuildVectorAllZeros(LHS.getNode()))) {
2812 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
2813 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
2814 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
2815 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
2819 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
2820 // Note: Compare against Zero does not support unsigned predicates.
2821 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2822 ISD::isBuildVectorAllZeros(LHS.getNode())) &&
2823 !isUnsignedIntSetCC(CC)) {
2825 // If LHS is the zero value, swap operands and CondCode.
2826 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2827 CC = getSetCCSwappedOperands(CC);
2832 // Ensure valid CondCode for Compare Mask against Zero instruction:
2833 // EQ, GE, GT, LE, LT.
2834 if (ISD::SETNE == CC) {
2839 // Using constant type to differentiate integer and FP compares with zero.
2840 Op1 = DAG.getConstant(0, MVT::i32);
2841 Opcode = AArch64ISD::NEON_CMPZ;
2844 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
2845 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
2849 llvm_unreachable("Illegal integer comparison.");
2865 CC = getSetCCSwappedOperands(CC);
2869 std::swap(LHS, RHS);
2871 Opcode = AArch64ISD::NEON_CMP;
2876 // Generate Compare Mask instr or Compare Mask against Zero instr.
2878 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2881 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2886 // Now handle Floating Point cases.
2887 // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
2888 if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2889 ISD::isBuildVectorAllZeros(LHS.getNode())) {
2891 // If LHS is the zero value, swap operands and CondCode.
2892 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2893 CC = getSetCCSwappedOperands(CC);
2898 // Using constant type to differentiate integer and FP compares with zero.
2899 Op1 = DAG.getConstantFP(0, MVT::f32);
2900 Opcode = AArch64ISD::NEON_CMPZ;
2902 // Attempt to use Vector Floating Point Compare Mask instruction.
2905 Opcode = AArch64ISD::NEON_CMP;
2909 // Some register compares have to be implemented with swapped CC and operands,
2910 // e.g.: OLT implemented as OGT with swapped operands.
2911 bool SwapIfRegArgs = false;
2913 // Ensure valid CondCode for FP Compare Mask against Zero instruction:
2914 // EQ, GE, GT, LE, LT.
2915 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
2918 llvm_unreachable("Illegal FP comparison");
2921 Invert = true; // Fallthrough
2929 SwapIfRegArgs = true;
2938 SwapIfRegArgs = true;
2947 SwapIfRegArgs = true;
2956 SwapIfRegArgs = true;
2963 Invert = true; // Fallthrough
2965 // Expand this to (OGT |OLT).
2967 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
2969 SwapIfRegArgs = true;
2972 Invert = true; // Fallthrough
2974 // Expand this to (OGE | OLT).
2976 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
2978 SwapIfRegArgs = true;
2982 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
2983 CC = getSetCCSwappedOperands(CC);
2984 std::swap(Op0, Op1);
2987 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
2988 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2990 if (NeonCmpAlt.getNode())
2991 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
2994 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2999 // (SETCC lhs, rhs, condcode)
3001 AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3003 SDValue LHS = Op.getOperand(0);
3004 SDValue RHS = Op.getOperand(1);
3005 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3006 EVT VT = Op.getValueType();
3009 return LowerVectorSETCC(Op, DAG);
3011 if (LHS.getValueType() == MVT::f128) {
3012 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
3013 // for the rest of the function (some i32 or i64 values).
3014 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
3016 // If softenSetCCOperands returned a scalar, use it.
3017 if (RHS.getNode() == 0) {
3018 assert(LHS.getValueType() == Op.getValueType() &&
3019 "Unexpected setcc expansion!");
3024 if (LHS.getValueType().isInteger()) {
3027 // Integers are handled in a separate function because the combinations of
3028 // immediates and tests can get hairy and we may want to fiddle things.
3029 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
3031 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
3032 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3036 // Note that some LLVM floating-point CondCodes can't be lowered to a single
3037 // conditional branch, hence FPCCToA64CC can set a second test, where either
3038 // passing is sufficient.
3039 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
3040 CondCode = FPCCToA64CC(CC, Alternative);
3041 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
3042 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
3043 DAG.getCondCode(CC));
3044 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
3045 CmpOp, DAG.getConstant(1, VT),
3046 DAG.getConstant(0, VT), A64cc);
3048 if (Alternative != A64CC::Invalid) {
3049 A64cc = DAG.getConstant(Alternative, MVT::i32);
3050 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
3051 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
3054 return A64SELECT_CC;
3057 static SDValue LowerVectorSELECT_CC(SDValue Op, SelectionDAG &DAG) {
3059 SDValue LHS = Op.getOperand(0);
3060 SDValue RHS = Op.getOperand(1);
3061 SDValue IfTrue = Op.getOperand(2);
3062 SDValue IfFalse = Op.getOperand(3);
3063 EVT IfTrueVT = IfTrue.getValueType();
3064 EVT CondVT = IfTrueVT.changeVectorElementTypeToInteger();
3065 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
3067 // If LHS & RHS are floating point and IfTrue & IfFalse are vectors, we will
3068 // use NEON compare.
3069 if ((LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64)) {
3070 EVT EltVT = LHS.getValueType();
3071 unsigned EltNum = 128 / EltVT.getSizeInBits();
3072 EVT VT = EVT::getVectorVT(*DAG.getContext(), EltVT, EltNum);
3073 unsigned SubConstant =
3074 (LHS.getValueType() == MVT::f32) ? AArch64::sub_32 :AArch64::sub_64;
3075 EVT CEltT = (LHS.getValueType() == MVT::f32) ? MVT::i32 : MVT::i64;
3076 EVT CVT = EVT::getVectorVT(*DAG.getContext(), CEltT, EltNum);
3079 = SDValue(DAG.getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
3080 VT, DAG.getTargetConstant(0, MVT::i32), LHS,
3081 DAG.getTargetConstant(SubConstant, MVT::i32)), 0);
3083 = SDValue(DAG.getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
3084 VT, DAG.getTargetConstant(0, MVT::i32), RHS,
3085 DAG.getTargetConstant(SubConstant, MVT::i32)), 0);
3087 SDValue VSetCC = DAG.getSetCC(dl, CVT, LHS, RHS, CC);
3088 SDValue ResCC = LowerVectorSETCC(VSetCC, DAG);
3089 if (CEltT.getSizeInBits() < IfTrueVT.getSizeInBits()) {
3091 EVT::getVectorVT(*DAG.getContext(), CEltT,
3092 IfTrueVT.getSizeInBits() / CEltT.getSizeInBits());
3093 ResCC = DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, DUPVT, ResCC,
3094 DAG.getConstant(0, MVT::i64, false));
3096 ResCC = DAG.getNode(ISD::BITCAST, dl, CondVT, ResCC);
3098 // FIXME: If IfTrue & IfFalse hold v1i8, v1i16 or v1i32, this function
3099 // can't handle them and will hit this assert.
3100 assert(CEltT.getSizeInBits() == IfTrueVT.getSizeInBits() &&
3101 "Vector of IfTrue & IfFalse is too small.");
3104 EltNum * IfTrueVT.getSizeInBits() / ResCC.getValueSizeInBits();
3105 EVT ExVT = EVT::getVectorVT(*DAG.getContext(), CEltT, ExEltNum);
3106 ResCC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExVT, ResCC,
3107 DAG.getConstant(0, MVT::i64, false));
3108 ResCC = DAG.getNode(ISD::BITCAST, dl, CondVT, ResCC);
3110 SDValue VSelect = DAG.getNode(ISD::VSELECT, dl, IfTrue.getValueType(),
3111 ResCC, IfTrue, IfFalse);
3115 // Here we handle the case that LHS & RHS are integer and IfTrue & IfFalse are
3117 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
3118 CondCode = FPCCToA64CC(CC, Alternative);
3119 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
3120 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
3121 DAG.getCondCode(CC));
3122 EVT SEVT = MVT::i32;
3123 if (IfTrue.getValueType().getVectorElementType().getSizeInBits() > 32)
3125 SDValue AllOne = DAG.getConstant(-1, SEVT);
3126 SDValue AllZero = DAG.getConstant(0, SEVT);
3127 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, SEVT, SetCC,
3128 AllOne, AllZero, A64cc);
3130 if (Alternative != A64CC::Invalid) {
3131 A64cc = DAG.getConstant(Alternative, MVT::i32);
3132 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
3133 SetCC, AllOne, A64SELECT_CC, A64cc);
3136 if (IfTrue.getValueType().getVectorNumElements() == 1)
3137 VDup = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, CondVT, A64SELECT_CC);
3139 VDup = DAG.getNode(AArch64ISD::NEON_VDUP, dl, CondVT, A64SELECT_CC);
3140 SDValue VSelect = DAG.getNode(ISD::VSELECT, dl, IfTrue.getValueType(),
3141 VDup, IfTrue, IfFalse);
3145 // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
3147 AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
3149 SDValue LHS = Op.getOperand(0);
3150 SDValue RHS = Op.getOperand(1);
3151 SDValue IfTrue = Op.getOperand(2);
3152 SDValue IfFalse = Op.getOperand(3);
3153 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
3155 if (IfTrue.getValueType().isVector())
3156 return LowerVectorSELECT_CC(Op, DAG);
3158 if (LHS.getValueType() == MVT::f128) {
3159 // f128 comparisons are lowered to libcalls, but slot in nicely here
3161 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
3163 // If softenSetCCOperands returned a scalar, we need to compare the result
3164 // against zero to select between true and false values.
3165 if (RHS.getNode() == 0) {
3166 RHS = DAG.getConstant(0, LHS.getValueType());
3171 if (LHS.getValueType().isInteger()) {
3174 // Integers are handled in a separate function because the combinations of
3175 // immediates and tests can get hairy and we may want to fiddle things.
3176 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
3178 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), CmpOp,
3179 IfTrue, IfFalse, A64cc);
3182 // Note that some LLVM floating-point CondCodes can't be lowered to a single
3183 // conditional branch, hence FPCCToA64CC can set a second test, where either
3184 // passing is sufficient.
3185 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
3186 CondCode = FPCCToA64CC(CC, Alternative);
3187 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
3188 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
3189 DAG.getCondCode(CC));
3190 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
3192 SetCC, IfTrue, IfFalse, A64cc);
3194 if (Alternative != A64CC::Invalid) {
3195 A64cc = DAG.getConstant(Alternative, MVT::i32);
3196 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
3197 SetCC, IfTrue, A64SELECT_CC, A64cc);
3201 return A64SELECT_CC;
3205 AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3206 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3207 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
3209 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
3210 // rather than just 8.
3211 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
3212 Op.getOperand(1), Op.getOperand(2),
3213 DAG.getConstant(32, MVT::i32), 8, false, false,
3214 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
3218 AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3219 // The layout of the va_list struct is specified in the AArch64 Procedure Call
3220 // Standard, section B.3.
3221 MachineFunction &MF = DAG.getMachineFunction();
3222 AArch64MachineFunctionInfo *FuncInfo
3223 = MF.getInfo<AArch64MachineFunctionInfo>();
3226 SDValue Chain = Op.getOperand(0);
3227 SDValue VAList = Op.getOperand(1);
3228 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3229 SmallVector<SDValue, 4> MemOps;
3231 // void *__stack at offset 0
3232 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
3234 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
3235 MachinePointerInfo(SV), false, false, 0));
3237 // void *__gr_top at offset 8
3238 int GPRSize = FuncInfo->getVariadicGPRSize();
3240 SDValue GRTop, GRTopAddr;
3242 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3243 DAG.getConstant(8, getPointerTy()));
3245 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
3246 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
3247 DAG.getConstant(GPRSize, getPointerTy()));
3249 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
3250 MachinePointerInfo(SV, 8),
3254 // void *__vr_top at offset 16
3255 int FPRSize = FuncInfo->getVariadicFPRSize();
3257 SDValue VRTop, VRTopAddr;
3258 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3259 DAG.getConstant(16, getPointerTy()));
3261 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
3262 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
3263 DAG.getConstant(FPRSize, getPointerTy()));
3265 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
3266 MachinePointerInfo(SV, 16),
3270 // int __gr_offs at offset 24
3271 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3272 DAG.getConstant(24, getPointerTy()));
3273 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
3274 GROffsAddr, MachinePointerInfo(SV, 24),
3277 // int __vr_offs at offset 28
3278 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3279 DAG.getConstant(28, getPointerTy()));
3280 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
3281 VROffsAddr, MachinePointerInfo(SV, 28),
3284 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
3289 AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3290 switch (Op.getOpcode()) {
3291 default: llvm_unreachable("Don't know how to custom lower this!");
3292 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
3293 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
3294 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
3295 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
3296 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
3297 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
3298 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
3299 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
3300 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
3301 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
3302 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3303 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3305 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
3306 case ISD::SRL_PARTS:
3307 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
3309 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3310 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3311 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
3312 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
3313 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3314 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3315 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3316 case ISD::SELECT: return LowerSELECT(Op, DAG);
3317 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
3318 case ISD::SETCC: return LowerSETCC(Op, DAG);
3319 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3320 case ISD::VASTART: return LowerVASTART(Op, DAG);
3321 case ISD::BUILD_VECTOR:
3322 return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
3323 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3324 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3330 /// Check if the specified splat value corresponds to a valid vector constant
3331 /// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
3332 /// so, return the encoded 8-bit immediate and the OpCmode instruction fields
3334 static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
3335 unsigned SplatBitSize, SelectionDAG &DAG,
3336 bool is128Bits, NeonModImmType type, EVT &VT,
3337 unsigned &Imm, unsigned &OpCmode) {
3338 switch (SplatBitSize) {
3340 llvm_unreachable("unexpected size for isNeonModifiedImm");
3342 if (type != Neon_Mov_Imm)
3344 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
3345 // Neon movi per byte: Op=0, Cmode=1110.
3348 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3352 // Neon move inst per halfword
3353 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3354 if ((SplatBits & ~0xff) == 0) {
3355 // Value = 0x00nn is 0x00nn LSL 0
3356 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
3357 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
3363 if ((SplatBits & ~0xff00) == 0) {
3364 // Value = 0xnn00 is 0x00nn LSL 8
3365 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
3366 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
3368 Imm = SplatBits >> 8;
3372 // can't handle any other
3377 // First the LSL variants (MSL is unusable by some interested instructions).
3379 // Neon move instr per word, shift zeros
3380 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3381 if ((SplatBits & ~0xff) == 0) {
3382 // Value = 0x000000nn is 0x000000nn LSL 0
3383 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
3384 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
3390 if ((SplatBits & ~0xff00) == 0) {
3391 // Value = 0x0000nn00 is 0x000000nn LSL 8
3392 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
3393 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
3395 Imm = SplatBits >> 8;
3399 if ((SplatBits & ~0xff0000) == 0) {
3400 // Value = 0x00nn0000 is 0x000000nn LSL 16
3401 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
3402 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
3404 Imm = SplatBits >> 16;
3408 if ((SplatBits & ~0xff000000) == 0) {
3409 // Value = 0xnn000000 is 0x000000nn LSL 24
3410 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
3411 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
3413 Imm = SplatBits >> 24;
3418 // Now the MSL immediates.
3420 // Neon move instr per word, shift ones
3421 if ((SplatBits & ~0xffff) == 0 &&
3422 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
3423 // Value = 0x0000nnff is 0x000000nn MSL 8
3424 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
3426 Imm = SplatBits >> 8;
3430 if ((SplatBits & ~0xffffff) == 0 &&
3431 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3432 // Value = 0x00nnffff is 0x000000nn MSL 16
3433 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
3435 Imm = SplatBits >> 16;
3439 // can't handle any other
3444 if (type != Neon_Mov_Imm)
3446 // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
3447 // movi Op=1, Cmode=1110.
3449 uint64_t BitMask = 0xff;
3451 unsigned ImmMask = 1;
3453 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3454 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3457 } else if ((SplatBits & BitMask) != 0) {
3464 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3472 static SDValue PerformANDCombine(SDNode *N,
3473 TargetLowering::DAGCombinerInfo &DCI) {
3475 SelectionDAG &DAG = DCI.DAG;
3477 EVT VT = N->getValueType(0);
3479 // We're looking for an SRA/SHL pair which form an SBFX.
3481 if (VT != MVT::i32 && VT != MVT::i64)
3484 if (!isa<ConstantSDNode>(N->getOperand(1)))
3487 uint64_t TruncMask = N->getConstantOperandVal(1);
3488 if (!isMask_64(TruncMask))
3491 uint64_t Width = CountPopulation_64(TruncMask);
3492 SDValue Shift = N->getOperand(0);
3494 if (Shift.getOpcode() != ISD::SRL)
3497 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3499 uint64_t LSB = Shift->getConstantOperandVal(1);
3501 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3504 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
3505 DAG.getConstant(LSB, MVT::i64),
3506 DAG.getConstant(LSB + Width - 1, MVT::i64));
3509 /// For a true bitfield insert, the bits getting into that contiguous mask
3510 /// should come from the low part of an existing value: they must be formed from
3511 /// a compatible SHL operation (unless they're already low). This function
3512 /// checks that condition and returns the least-significant bit that's
3513 /// intended. If the operation not a field preparation, -1 is returned.
3514 static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
3515 SDValue &MaskedVal, uint64_t Mask) {
3516 if (!isShiftedMask_64(Mask))
3519 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
3520 // instruction. BFI will do a left-shift by LSB before applying the mask we've
3521 // spotted, so in general we should pre-emptively "undo" that by making sure
3522 // the incoming bits have had a right-shift applied to them.
3524 // This right shift, however, will combine with existing left/right shifts. In
3525 // the simplest case of a completely straight bitfield operation, it will be
3526 // expected to completely cancel out with an existing SHL. More complicated
3527 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
3530 uint64_t LSB = countTrailingZeros(Mask);
3531 int64_t ShiftRightRequired = LSB;
3532 if (MaskedVal.getOpcode() == ISD::SHL &&
3533 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3534 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
3535 MaskedVal = MaskedVal.getOperand(0);
3536 } else if (MaskedVal.getOpcode() == ISD::SRL &&
3537 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3538 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
3539 MaskedVal = MaskedVal.getOperand(0);
3542 if (ShiftRightRequired > 0)
3543 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
3544 DAG.getConstant(ShiftRightRequired, MVT::i64));
3545 else if (ShiftRightRequired < 0) {
3546 // We could actually end up with a residual left shift, for example with
3547 // "struc.bitfield = val << 1".
3548 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
3549 DAG.getConstant(-ShiftRightRequired, MVT::i64));
3555 /// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
3556 /// a mask and an extension. Returns true if a BFI was found and provides
3557 /// information on its surroundings.
3558 static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
3561 if (N.getOpcode() == ISD::ZERO_EXTEND) {
3563 N = N.getOperand(0);
3566 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
3567 Mask = N->getConstantOperandVal(1);
3568 N = N.getOperand(0);
3570 // Mask is the whole width.
3571 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
3574 if (N.getOpcode() == AArch64ISD::BFI) {
3582 /// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
3583 /// is roughly equivalent to (and (BFI ...), mask). This form is used because it
3584 /// can often be further combined with a larger mask. Ultimately, we want mask
3585 /// to be 2^32-1 or 2^64-1 so the AND can be skipped.
3586 static SDValue tryCombineToBFI(SDNode *N,
3587 TargetLowering::DAGCombinerInfo &DCI,
3588 const AArch64Subtarget *Subtarget) {
3589 SelectionDAG &DAG = DCI.DAG;
3591 EVT VT = N->getValueType(0);
3593 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3595 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
3596 // abandon the effort.
3597 SDValue LHS = N->getOperand(0);
3598 if (LHS.getOpcode() != ISD::AND)
3602 if (isa<ConstantSDNode>(LHS.getOperand(1)))
3603 LHSMask = LHS->getConstantOperandVal(1);
3607 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
3608 // is or abandon the effort.
3609 SDValue RHS = N->getOperand(1);
3610 if (RHS.getOpcode() != ISD::AND)
3614 if (isa<ConstantSDNode>(RHS.getOperand(1)))
3615 RHSMask = RHS->getConstantOperandVal(1);
3619 // Can't do anything if the masks are incompatible.
3620 if (LHSMask & RHSMask)
3623 // Now we need one of the masks to be a contiguous field. Without loss of
3624 // generality that should be the RHS one.
3625 SDValue Bitfield = LHS.getOperand(0);
3626 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
3627 // We know that LHS is a candidate new value, and RHS isn't already a better
3629 std::swap(LHS, RHS);
3630 std::swap(LHSMask, RHSMask);
3633 // We've done our best to put the right operands in the right places, all we
3634 // can do now is check whether a BFI exists.
3635 Bitfield = RHS.getOperand(0);
3636 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
3640 uint32_t Width = CountPopulation_64(RHSMask);
3641 assert(Width && "Expected non-zero bitfield width");
3643 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3644 LHS.getOperand(0), Bitfield,
3645 DAG.getConstant(LSB, MVT::i64),
3646 DAG.getConstant(Width, MVT::i64));
3649 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3652 return DAG.getNode(ISD::AND, DL, VT, BFI,
3653 DAG.getConstant(LHSMask | RHSMask, VT));
3656 /// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
3657 /// original input. This is surprisingly common because SROA splits things up
3658 /// into i8 chunks, so the originally detected MaskedBFI may actually only act
3659 /// on the low (say) byte of a word. This is then orred into the rest of the
3660 /// word afterwards.
3662 /// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
3664 /// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
3665 /// MaskedBFI. We can also deal with a certain amount of extend/truncate being
3667 static SDValue tryCombineToLargerBFI(SDNode *N,
3668 TargetLowering::DAGCombinerInfo &DCI,
3669 const AArch64Subtarget *Subtarget) {
3670 SelectionDAG &DAG = DCI.DAG;
3672 EVT VT = N->getValueType(0);
3674 // First job is to hunt for a MaskedBFI on either the left or right. Swap
3675 // operands if it's actually on the right.
3677 SDValue PossExtraMask;
3678 uint64_t ExistingMask = 0;
3679 bool Extended = false;
3680 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
3681 PossExtraMask = N->getOperand(1);
3682 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
3683 PossExtraMask = N->getOperand(0);
3687 // We can only combine a BFI with another compatible mask.
3688 if (PossExtraMask.getOpcode() != ISD::AND ||
3689 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
3692 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
3694 // Masks must be compatible.
3695 if (ExtraMask & ExistingMask)
3698 SDValue OldBFIVal = BFI.getOperand(0);
3699 SDValue NewBFIVal = BFI.getOperand(1);
3701 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
3702 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
3703 // need to be made compatible.
3704 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
3705 && "Invalid types for BFI");
3706 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
3707 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
3710 // We need the MaskedBFI to be combined with a mask of the *same* value.
3711 if (PossExtraMask.getOperand(0) != OldBFIVal)
3714 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3715 OldBFIVal, NewBFIVal,
3716 BFI.getOperand(2), BFI.getOperand(3));
3718 // If the masking is trivial, we don't need to create it.
3719 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3722 return DAG.getNode(ISD::AND, DL, VT, BFI,
3723 DAG.getConstant(ExtraMask | ExistingMask, VT));
3726 /// An EXTR instruction is made up of two shifts, ORed together. This helper
3727 /// searches for and classifies those shifts.
3728 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
3730 if (N.getOpcode() == ISD::SHL)
3732 else if (N.getOpcode() == ISD::SRL)
3737 if (!isa<ConstantSDNode>(N.getOperand(1)))
3740 ShiftAmount = N->getConstantOperandVal(1);
3741 Src = N->getOperand(0);
3745 /// EXTR instruction extracts a contiguous chunk of bits from two existing
3746 /// registers viewed as a high/low pair. This function looks for the pattern:
3747 /// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
3748 /// EXTR. Can't quite be done in TableGen because the two immediates aren't
3750 static SDValue tryCombineToEXTR(SDNode *N,
3751 TargetLowering::DAGCombinerInfo &DCI) {
3752 SelectionDAG &DAG = DCI.DAG;
3754 EVT VT = N->getValueType(0);
3756 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3758 if (VT != MVT::i32 && VT != MVT::i64)
3762 uint32_t ShiftLHS = 0;
3764 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
3768 uint32_t ShiftRHS = 0;
3770 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
3773 // If they're both trying to come from the high part of the register, they're
3774 // not really an EXTR.
3775 if (LHSFromHi == RHSFromHi)
3778 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
3782 std::swap(LHS, RHS);
3783 std::swap(ShiftLHS, ShiftRHS);
3786 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
3788 DAG.getConstant(ShiftRHS, MVT::i64));
3791 /// Target-specific dag combine xforms for ISD::OR
3792 static SDValue PerformORCombine(SDNode *N,
3793 TargetLowering::DAGCombinerInfo &DCI,
3794 const AArch64Subtarget *Subtarget) {
3796 SelectionDAG &DAG = DCI.DAG;
3798 EVT VT = N->getValueType(0);
3800 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3803 // Attempt to recognise bitfield-insert operations.
3804 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
3808 // Attempt to combine an existing MaskedBFI operation into one with a larger
3810 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
3814 Res = tryCombineToEXTR(N, DCI);
3818 if (!Subtarget->hasNEON())
3821 // Attempt to use vector immediate-form BSL
3822 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
3824 SDValue N0 = N->getOperand(0);
3825 if (N0.getOpcode() != ISD::AND)
3828 SDValue N1 = N->getOperand(1);
3829 if (N1.getOpcode() != ISD::AND)
3832 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
3834 unsigned SplatBitSize;
3836 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
3838 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3841 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
3843 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3844 HasAnyUndefs) && !HasAnyUndefs &&
3845 SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
3846 SplatBits0 == ~SplatBits1) {
3848 return DAG.getNode(ISD::VSELECT, DL, VT, N0->getOperand(1),
3849 N0->getOperand(0), N1->getOperand(0));
3857 /// Target-specific dag combine xforms for ISD::SRA
3858 static SDValue PerformSRACombine(SDNode *N,
3859 TargetLowering::DAGCombinerInfo &DCI) {
3861 SelectionDAG &DAG = DCI.DAG;
3863 EVT VT = N->getValueType(0);
3865 // We're looking for an SRA/SHL pair which form an SBFX.
3867 if (VT != MVT::i32 && VT != MVT::i64)
3870 if (!isa<ConstantSDNode>(N->getOperand(1)))
3873 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
3874 SDValue Shift = N->getOperand(0);
3876 if (Shift.getOpcode() != ISD::SHL)
3879 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3882 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
3883 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
3884 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
3886 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3889 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
3890 DAG.getConstant(LSB, MVT::i64),
3891 DAG.getConstant(LSB + Width - 1, MVT::i64));
3894 /// Check if this is a valid build_vector for the immediate operand of
3895 /// a vector shift operation, where all the elements of the build_vector
3896 /// must have the same constant integer value.
3897 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3898 // Ignore bit_converts.
3899 while (Op.getOpcode() == ISD::BITCAST)
3900 Op = Op.getOperand(0);
3901 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3902 APInt SplatBits, SplatUndef;
3903 unsigned SplatBitSize;
3905 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3906 HasAnyUndefs, ElementBits) ||
3907 SplatBitSize > ElementBits)
3909 Cnt = SplatBits.getSExtValue();
3913 /// Check if this is a valid build_vector for the immediate operand of
3914 /// a vector shift left operation. That value must be in the range:
3915 /// 0 <= Value < ElementBits
3916 static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
3917 assert(VT.isVector() && "vector shift count is not a vector type");
3918 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3919 if (!getVShiftImm(Op, ElementBits, Cnt))
3921 return (Cnt >= 0 && Cnt < ElementBits);
3924 /// Check if this is a valid build_vector for the immediate operand of a
3925 /// vector shift right operation. The value must be in the range:
3926 /// 1 <= Value <= ElementBits
3927 static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
3928 assert(VT.isVector() && "vector shift count is not a vector type");
3929 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3930 if (!getVShiftImm(Op, ElementBits, Cnt))
3932 return (Cnt >= 1 && Cnt <= ElementBits);
3935 static SDValue GenForSextInreg(SDNode *N,
3936 TargetLowering::DAGCombinerInfo &DCI,
3937 EVT SrcVT, EVT DestVT, EVT SubRegVT,
3938 const int *Mask, SDValue Src) {
3939 SelectionDAG &DAG = DCI.DAG;
3941 = DAG.getNode(ISD::BITCAST, SDLoc(N), SrcVT, Src);
3943 = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), DestVT, Bitcast);
3945 = DAG.getVectorShuffle(DestVT, SDLoc(N), Sext, DAG.getUNDEF(DestVT), Mask);
3946 SDValue ExtractSubreg
3947 = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N),
3948 SubRegVT, ShuffleVec,
3949 DAG.getTargetConstant(AArch64::sub_64, MVT::i32)), 0);
3950 return ExtractSubreg;
3953 /// Checks for vector shifts and lowers them.
3954 static SDValue PerformShiftCombine(SDNode *N,
3955 TargetLowering::DAGCombinerInfo &DCI,
3956 const AArch64Subtarget *ST) {
3957 SelectionDAG &DAG = DCI.DAG;
3958 EVT VT = N->getValueType(0);
3959 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
3960 return PerformSRACombine(N, DCI);
3962 // We're looking for an SRA/SHL pair to help generating instruction
3963 // sshll v0.8h, v0.8b, #0
3964 // The instruction STXL is also the alias of this instruction.
3966 // For example, for DAG like below,
3967 // v2i32 = sra (v2i32 (shl v2i32, 16)), 16
3968 // we can transform it into
3969 // v2i32 = EXTRACT_SUBREG
3970 // (v4i32 (suffle_vector
3971 // (v4i32 (sext (v4i16 (bitcast v2i32))),
3972 // undef, (0, 2, u, u)),
3975 // With this transformation we expect to generate "SSHLL + UZIP1"
3976 // Sometimes UZIP1 can be optimized away by combining with other context.
3977 int64_t ShrCnt, ShlCnt;
3978 if (N->getOpcode() == ISD::SRA
3979 && (VT == MVT::v2i32 || VT == MVT::v4i16)
3980 && isVShiftRImm(N->getOperand(1), VT, ShrCnt)
3981 && N->getOperand(0).getOpcode() == ISD::SHL
3982 && isVShiftRImm(N->getOperand(0).getOperand(1), VT, ShlCnt)) {
3983 SDValue Src = N->getOperand(0).getOperand(0);
3984 if (VT == MVT::v2i32 && ShrCnt == 16 && ShlCnt == 16) {
3985 // sext_inreg(v2i32, v2i16)
3986 // We essentially only care the Mask {0, 2, u, u}
3987 int Mask[4] = {0, 2, 4, 6};
3988 return GenForSextInreg(N, DCI, MVT::v4i16, MVT::v4i32, MVT::v2i32,
3991 else if (VT == MVT::v2i32 && ShrCnt == 24 && ShlCnt == 24) {
3992 // sext_inreg(v2i16, v2i8)
3993 // We essentially only care the Mask {0, u, 4, u, u, u, u, u, u, u, u, u}
3994 int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14};
3995 return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v2i32,
3998 else if (VT == MVT::v4i16 && ShrCnt == 8 && ShlCnt == 8) {
3999 // sext_inreg(v4i16, v4i8)
4000 // We essentially only care the Mask {0, 2, 4, 6, u, u, u, u, u, u, u, u}
4001 int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14};
4002 return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v4i16,
4007 // Nothing to be done for scalar shifts.
4008 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4009 if (!VT.isVector() || !TLI.isTypeLegal(VT))
4012 assert(ST->hasNEON() && "unexpected vector shift");
4015 switch (N->getOpcode()) {
4017 llvm_unreachable("unexpected shift opcode");
4020 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
4022 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
4023 DAG.getConstant(Cnt, MVT::i32));
4024 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
4030 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
4032 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
4033 DAG.getConstant(Cnt, MVT::i32));
4034 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
4042 /// ARM-specific DAG combining for intrinsics.
4043 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
4044 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4048 // Don't do anything for most intrinsics.
4051 case Intrinsic::arm_neon_vqshifts:
4052 case Intrinsic::arm_neon_vqshiftu:
4053 EVT VT = N->getOperand(1).getValueType();
4055 if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
4057 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
4058 ? AArch64ISD::NEON_QSHLs
4059 : AArch64ISD::NEON_QSHLu;
4060 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
4061 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
4067 /// Target-specific DAG combine function for NEON load/store intrinsics
4068 /// to merge base address updates.
4069 static SDValue CombineBaseUpdate(SDNode *N,
4070 TargetLowering::DAGCombinerInfo &DCI) {
4071 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
4074 SelectionDAG &DAG = DCI.DAG;
4075 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
4076 N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
4077 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
4078 SDValue Addr = N->getOperand(AddrOpIdx);
4080 // Search for a use of the address operand that is an increment.
4081 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
4082 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
4084 if (User->getOpcode() != ISD::ADD ||
4085 UI.getUse().getResNo() != Addr.getResNo())
4088 // Check that the add is independent of the load/store. Otherwise, folding
4089 // it would create a cycle.
4090 if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
4093 // Find the new opcode for the updating load/store.
4095 bool isLaneOp = false;
4096 unsigned NewOpc = 0;
4097 unsigned NumVecs = 0;
4099 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4101 default: llvm_unreachable("unexpected intrinsic for Neon base update");
4102 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD;
4104 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD;
4106 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD;
4108 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD;
4110 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD;
4111 NumVecs = 1; isLoad = false; break;
4112 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD;
4113 NumVecs = 2; isLoad = false; break;
4114 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD;
4115 NumVecs = 3; isLoad = false; break;
4116 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD;
4117 NumVecs = 4; isLoad = false; break;
4118 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
4120 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
4122 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
4124 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
4125 NumVecs = 2; isLoad = false; break;
4126 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
4127 NumVecs = 3; isLoad = false; break;
4128 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
4129 NumVecs = 4; isLoad = false; break;
4130 case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD;
4131 NumVecs = 2; isLaneOp = true; break;
4132 case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD;
4133 NumVecs = 3; isLaneOp = true; break;
4134 case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD;
4135 NumVecs = 4; isLaneOp = true; break;
4136 case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD;
4137 NumVecs = 2; isLoad = false; isLaneOp = true; break;
4138 case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD;
4139 NumVecs = 3; isLoad = false; isLaneOp = true; break;
4140 case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD;
4141 NumVecs = 4; isLoad = false; isLaneOp = true; break;
4145 switch (N->getOpcode()) {
4146 default: llvm_unreachable("unexpected opcode for Neon base update");
4147 case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD;
4149 case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD;
4151 case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD;
4156 // Find the size of memory referenced by the load/store.
4159 VecTy = N->getValueType(0);
4161 VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
4162 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
4164 NumBytes /= VecTy.getVectorNumElements();
4166 // If the increment is a constant, it must match the memory ref size.
4167 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
4168 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
4169 uint32_t IncVal = CInc->getZExtValue();
4170 if (IncVal != NumBytes)
4172 Inc = DAG.getTargetConstant(IncVal, MVT::i32);
4175 // Create the new updating load/store node.
4177 unsigned NumResultVecs = (isLoad ? NumVecs : 0);
4179 for (n = 0; n < NumResultVecs; ++n)
4181 Tys[n++] = MVT::i64;
4182 Tys[n] = MVT::Other;
4183 SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, NumResultVecs + 2));
4184 SmallVector<SDValue, 8> Ops;
4185 Ops.push_back(N->getOperand(0)); // incoming chain
4186 Ops.push_back(N->getOperand(AddrOpIdx));
4188 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
4189 Ops.push_back(N->getOperand(i));
4191 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
4192 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
4193 Ops.data(), Ops.size(),
4194 MemInt->getMemoryVT(),
4195 MemInt->getMemOperand());
4198 std::vector<SDValue> NewResults;
4199 for (unsigned i = 0; i < NumResultVecs; ++i) {
4200 NewResults.push_back(SDValue(UpdN.getNode(), i));
4202 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
4203 DCI.CombineTo(N, NewResults);
4204 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
4211 /// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1)
4212 /// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs.
4213 /// If so, combine them to a vldN-dup operation and return true.
4214 static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
4215 SelectionDAG &DAG = DCI.DAG;
4216 EVT VT = N->getValueType(0);
4218 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
4219 SDNode *VLD = N->getOperand(0).getNode();
4220 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
4222 unsigned NumVecs = 0;
4223 unsigned NewOpc = 0;
4224 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
4225 if (IntNo == Intrinsic::arm_neon_vld2lane) {
4227 NewOpc = AArch64ISD::NEON_LD2DUP;
4228 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
4230 NewOpc = AArch64ISD::NEON_LD3DUP;
4231 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
4233 NewOpc = AArch64ISD::NEON_LD4DUP;
4238 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
4239 // numbers match the load.
4240 unsigned VLDLaneNo =
4241 cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue();
4242 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
4244 // Ignore uses of the chain result.
4245 if (UI.getUse().getResNo() == NumVecs)
4248 if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE ||
4249 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
4253 // Create the vldN-dup node.
4256 for (n = 0; n < NumVecs; ++n)
4258 Tys[n] = MVT::Other;
4259 SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, NumVecs + 1));
4260 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
4261 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
4262 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2,
4263 VLDMemInt->getMemoryVT(),
4264 VLDMemInt->getMemOperand());
4267 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
4269 unsigned ResNo = UI.getUse().getResNo();
4270 // Ignore uses of the chain result.
4271 if (ResNo == NumVecs)
4274 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
4277 // Now the vldN-lane intrinsic is dead except for its chain result.
4278 // Update uses of the chain.
4279 std::vector<SDValue> VLDDupResults;
4280 for (unsigned n = 0; n < NumVecs; ++n)
4281 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
4282 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
4283 DCI.CombineTo(VLD, VLDDupResults);
4285 return SDValue(N, 0);
4288 // vselect (v1i1 setcc) ->
4289 // vselect (v1iXX setcc) (XX is the size of the compared operand type)
4290 // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
4291 // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
4293 static SDValue PerformVSelectCombine(SDNode *N, SelectionDAG &DAG) {
4294 SDValue N0 = N->getOperand(0);
4295 EVT CCVT = N0.getValueType();
4297 if (N0.getOpcode() != ISD::SETCC || CCVT.getVectorNumElements() != 1 ||
4298 CCVT.getVectorElementType() != MVT::i1)
4301 EVT ResVT = N->getValueType(0);
4302 EVT CmpVT = N0.getOperand(0).getValueType();
4303 // Only combine when the result type is of the same size as the compared
4305 if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
4308 SDValue IfTrue = N->getOperand(1);
4309 SDValue IfFalse = N->getOperand(2);
4311 DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
4312 N0.getOperand(0), N0.getOperand(1),
4313 cast<CondCodeSDNode>(N0.getOperand(2))->get());
4314 return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
4318 // sign_extend (extract_vector_elt (v1i1 setcc)) ->
4319 // extract_vector_elt (v1iXX setcc)
4320 // (XX is the size of the compared operand type)
4321 static SDValue PerformSignExtendCombine(SDNode *N, SelectionDAG &DAG) {
4322 SDValue N0 = N->getOperand(0);
4323 SDValue Vec = N0.getOperand(0);
4325 if (N0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4326 Vec.getOpcode() != ISD::SETCC)
4329 EVT ResVT = N->getValueType(0);
4330 EVT CmpVT = Vec.getOperand(0).getValueType();
4331 // Only optimize when the result type is of the same size as the element
4332 // type of the compared operand.
4333 if (ResVT.getSizeInBits() != CmpVT.getVectorElementType().getSizeInBits())
4336 SDValue Lane = N0.getOperand(1);
4338 DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
4339 Vec.getOperand(0), Vec.getOperand(1),
4340 cast<CondCodeSDNode>(Vec.getOperand(2))->get());
4341 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ResVT,
4346 AArch64TargetLowering::PerformDAGCombine(SDNode *N,
4347 DAGCombinerInfo &DCI) const {
4348 switch (N->getOpcode()) {
4350 case ISD::AND: return PerformANDCombine(N, DCI);
4351 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
4355 return PerformShiftCombine(N, DCI, getSubtarget());
4356 case ISD::VSELECT: return PerformVSelectCombine(N, DCI.DAG);
4357 case ISD::SIGN_EXTEND: return PerformSignExtendCombine(N, DCI.DAG);
4358 case ISD::INTRINSIC_WO_CHAIN:
4359 return PerformIntrinsicCombine(N, DCI.DAG);
4360 case AArch64ISD::NEON_VDUPLANE:
4361 return CombineVLDDUP(N, DCI);
4362 case AArch64ISD::NEON_LD2DUP:
4363 case AArch64ISD::NEON_LD3DUP:
4364 case AArch64ISD::NEON_LD4DUP:
4365 return CombineBaseUpdate(N, DCI);
4366 case ISD::INTRINSIC_VOID:
4367 case ISD::INTRINSIC_W_CHAIN:
4368 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
4369 case Intrinsic::arm_neon_vld1:
4370 case Intrinsic::arm_neon_vld2:
4371 case Intrinsic::arm_neon_vld3:
4372 case Intrinsic::arm_neon_vld4:
4373 case Intrinsic::arm_neon_vst1:
4374 case Intrinsic::arm_neon_vst2:
4375 case Intrinsic::arm_neon_vst3:
4376 case Intrinsic::arm_neon_vst4:
4377 case Intrinsic::arm_neon_vld2lane:
4378 case Intrinsic::arm_neon_vld3lane:
4379 case Intrinsic::arm_neon_vld4lane:
4380 case Intrinsic::aarch64_neon_vld1x2:
4381 case Intrinsic::aarch64_neon_vld1x3:
4382 case Intrinsic::aarch64_neon_vld1x4:
4383 case Intrinsic::aarch64_neon_vst1x2:
4384 case Intrinsic::aarch64_neon_vst1x3:
4385 case Intrinsic::aarch64_neon_vst1x4:
4386 case Intrinsic::arm_neon_vst2lane:
4387 case Intrinsic::arm_neon_vst3lane:
4388 case Intrinsic::arm_neon_vst4lane:
4389 return CombineBaseUpdate(N, DCI);
4398 AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
4399 VT = VT.getScalarType();
4404 switch (VT.getSimpleVT().SimpleTy) {
4418 bool AArch64TargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
4421 const AArch64Subtarget *Subtarget = getSubtarget();
4422 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
4423 bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
4425 switch (VT.getSimpleVT().SimpleTy) {
4429 case MVT::i8: case MVT::i16:
4430 case MVT::i32: case MVT::i64:
4431 case MVT::f32: case MVT::f64: {
4432 // Unaligned access can use (for example) LRDB, LRDH, LDRW
4433 if (AllowsUnaligned) {
4440 // 64-bit vector types
4441 case MVT::v8i8: case MVT::v4i16:
4442 case MVT::v2i32: case MVT::v1i64:
4443 case MVT::v2f32: case MVT::v1f64:
4444 // 128-bit vector types
4445 case MVT::v16i8: case MVT::v8i16:
4446 case MVT::v4i32: case MVT::v2i64:
4447 case MVT::v4f32: case MVT::v2f64: {
4448 // For any little-endian targets with neon, we can support unaligned
4449 // load/store of V registers using ld1/st1.
4450 // A big-endian target may also explicitly support unaligned accesses
4451 if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) {
4461 // Check whether a shuffle_vector could be presented as concat_vector.
4462 bool AArch64TargetLowering::isConcatVector(SDValue Op, SelectionDAG &DAG,
4463 SDValue V0, SDValue V1,
4465 SDValue &Res) const {
4467 EVT VT = Op.getValueType();
4468 if (VT.getSizeInBits() != 128)
4470 if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
4471 VT.getVectorElementType() != V1.getValueType().getVectorElementType())
4474 unsigned NumElts = VT.getVectorNumElements();
4475 bool isContactVector = true;
4476 bool splitV0 = false;
4477 if (V0.getValueType().getSizeInBits() == 128)
4480 for (int I = 0, E = NumElts / 2; I != E; I++) {
4482 isContactVector = false;
4487 if (isContactVector) {
4488 int offset = NumElts / 2;
4489 for (int I = NumElts / 2, E = NumElts; I != E; I++) {
4490 if (Mask[I] != I + splitV0 * offset) {
4491 isContactVector = false;
4497 if (isContactVector) {
4498 EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
4501 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
4502 DAG.getConstant(0, MVT::i64));
4504 if (V1.getValueType().getSizeInBits() == 128) {
4505 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
4506 DAG.getConstant(0, MVT::i64));
4508 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
4514 // Check whether a Build Vector could be presented as Shuffle Vector.
4515 // This Shuffle Vector maybe not legalized, so the length of its operand and
4516 // the length of result may not equal.
4517 bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG,
4518 SDValue &V0, SDValue &V1,
4521 EVT VT = Op.getValueType();
4522 unsigned NumElts = VT.getVectorNumElements();
4523 unsigned V0NumElts = 0;
4525 // Check if all elements are extracted from less than 3 vectors.
4526 for (unsigned i = 0; i < NumElts; ++i) {
4527 SDValue Elt = Op.getOperand(i);
4528 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4529 Elt.getOperand(0).getValueType().getVectorElementType() !=
4530 VT.getVectorElementType())
4533 if (V0.getNode() == 0) {
4534 V0 = Elt.getOperand(0);
4535 V0NumElts = V0.getValueType().getVectorNumElements();
4537 if (Elt.getOperand(0) == V0) {
4538 Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue());
4540 } else if (V1.getNode() == 0) {
4541 V1 = Elt.getOperand(0);
4543 if (Elt.getOperand(0) == V1) {
4544 unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue();
4545 Mask[i] = (Lane + V0NumElts);
4554 // LowerShiftRightParts - Lower SRL_PARTS and SRA_PARTS, which returns two
4555 /// i64 values and take a 2 x i64 value to shift plus a shift amount.
4556 SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op,
4557 SelectionDAG &DAG) const {
4558 assert(Op.getNumOperands() == 3 && "Not a quad-shift!");
4559 EVT VT = Op.getValueType();
4560 unsigned VTBits = VT.getSizeInBits();
4562 SDValue ShOpLo = Op.getOperand(0);
4563 SDValue ShOpHi = Op.getOperand(1);
4564 SDValue ShAmt = Op.getOperand(2);
4565 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
4567 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
4568 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
4569 DAG.getConstant(VTBits, MVT::i64), ShAmt);
4570 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
4571 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
4572 DAG.getConstant(VTBits, MVT::i64));
4573 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
4574 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4575 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4576 SDValue Tmp3 = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
4579 SDValue CmpOp = getSelectableIntSetCC(ExtraShAmt,
4580 DAG.getConstant(0, MVT::i64),
4584 SDValue Hi = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4585 DAG.getConstant(0, Tmp3.getValueType()), Tmp3,
4587 SDValue Lo = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4588 TrueVal, FalseVal, A64cc);
4590 SDValue Ops[2] = { Lo, Hi };
4591 return DAG.getMergeValues(Ops, 2, dl);
4594 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
4595 /// i64 values and take a 2 x i64 value to shift plus a shift amount.
4596 SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op,
4597 SelectionDAG &DAG) const {
4598 assert(Op.getNumOperands() == 3 && "Not a quad-shift!");
4599 EVT VT = Op.getValueType();
4600 unsigned VTBits = VT.getSizeInBits();
4602 SDValue ShOpLo = Op.getOperand(0);
4603 SDValue ShOpHi = Op.getOperand(1);
4604 SDValue ShAmt = Op.getOperand(2);
4606 assert(Op.getOpcode() == ISD::SHL_PARTS);
4607 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
4608 DAG.getConstant(VTBits, MVT::i64), ShAmt);
4609 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
4610 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
4611 DAG.getConstant(VTBits, MVT::i64));
4612 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
4613 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
4614 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4615 SDValue Tmp4 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
4618 SDValue CmpOp = getSelectableIntSetCC(ExtraShAmt,
4619 DAG.getConstant(0, MVT::i64),
4623 SDValue Lo = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4624 DAG.getConstant(0, Tmp4.getValueType()), Tmp4,
4626 SDValue Hi = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4627 Tmp3, FalseVal, A64cc);
4629 SDValue Ops[2] = { Lo, Hi };
4630 return DAG.getMergeValues(Ops, 2, dl);
4633 // If this is a case we can't handle, return null and let the default
4634 // expansion code take care of it.
4636 AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
4637 const AArch64Subtarget *ST) const {
4639 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
4641 EVT VT = Op.getValueType();
4643 APInt SplatBits, SplatUndef;
4644 unsigned SplatBitSize;
4647 unsigned UseNeonMov = VT.getSizeInBits() >= 64;
4649 // Note we favor lowering MOVI over MVNI.
4650 // This has implications on the definition of patterns in TableGen to select
4651 // BIC immediate instructions but not ORR immediate instructions.
4652 // If this lowering order is changed, TableGen patterns for BIC immediate and
4653 // ORR immediate instructions have to be updated.
4655 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
4656 if (SplatBitSize <= 64) {
4657 // First attempt to use vector immediate-form MOVI
4660 unsigned OpCmode = 0;
4662 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
4663 SplatBitSize, DAG, VT.is128BitVector(),
4664 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
4665 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
4666 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
4668 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
4669 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
4670 ImmVal, OpCmodeVal);
4671 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
4675 // Then attempt to use vector immediate-form MVNI
4676 uint64_t NegatedImm = (~SplatBits).getZExtValue();
4677 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
4678 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
4680 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
4681 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
4682 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
4683 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
4684 ImmVal, OpCmodeVal);
4685 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
4689 // Attempt to use vector immediate-form FMOV
4690 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
4691 (VT == MVT::v2f64 && SplatBitSize == 64)) {
4693 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
4696 if (A64Imms::isFPImm(RealVal, ImmVal)) {
4697 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
4698 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
4704 unsigned NumElts = VT.getVectorNumElements();
4705 bool isOnlyLowElement = true;
4706 bool usesOnlyOneValue = true;
4707 bool hasDominantValue = false;
4708 bool isConstant = true;
4710 // Map of the number of times a particular SDValue appears in the
4712 DenseMap<SDValue, unsigned> ValueCounts;
4714 for (unsigned i = 0; i < NumElts; ++i) {
4715 SDValue V = Op.getOperand(i);
4716 if (V.getOpcode() == ISD::UNDEF)
4719 isOnlyLowElement = false;
4720 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
4723 ValueCounts.insert(std::make_pair(V, 0));
4724 unsigned &Count = ValueCounts[V];
4726 // Is this value dominant? (takes up more than half of the lanes)
4727 if (++Count > (NumElts / 2)) {
4728 hasDominantValue = true;
4732 if (ValueCounts.size() != 1)
4733 usesOnlyOneValue = false;
4734 if (!Value.getNode() && ValueCounts.size() > 0)
4735 Value = ValueCounts.begin()->first;
4737 if (ValueCounts.size() == 0)
4738 return DAG.getUNDEF(VT);
4740 if (isOnlyLowElement)
4741 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4743 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4744 if (hasDominantValue && EltSize <= 64) {
4745 // Use VDUP for non-constant splats.
4749 // If we are DUPing a value that comes directly from a vector, we could
4750 // just use DUPLANE. We can only do this if the lane being extracted
4751 // is at a constant index, as the DUP from lane instructions only have
4752 // constant-index forms.
4754 // If there is a TRUNCATE between EXTRACT_VECTOR_ELT and DUP, we can
4755 // remove TRUNCATE for DUPLANE by apdating the source vector to
4756 // appropriate vector type and lane index.
4758 // FIXME: for now we have v1i8, v1i16, v1i32 legal vector types, if they
4759 // are not legal any more, no need to check the type size in bits should
4760 // be large than 64.
4762 if (Value->getOpcode() == ISD::TRUNCATE)
4763 V = Value->getOperand(0);
4764 if (V->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4765 isa<ConstantSDNode>(V->getOperand(1)) &&
4766 V->getOperand(0).getValueType().getSizeInBits() >= 64) {
4768 // If the element size of source vector is larger than DUPLANE
4769 // element size, we can do transformation by,
4770 // 1) bitcasting source register to smaller element vector
4771 // 2) mutiplying the lane index by SrcEltSize/ResEltSize
4772 // For example, we can lower
4773 // "v8i16 vdup_lane(v4i32, 1)"
4775 // "v8i16 vdup_lane(v8i16 bitcast(v4i32), 2)".
4776 SDValue SrcVec = V->getOperand(0);
4777 unsigned SrcEltSize =
4778 SrcVec.getValueType().getVectorElementType().getSizeInBits();
4779 unsigned ResEltSize = VT.getVectorElementType().getSizeInBits();
4780 if (SrcEltSize > ResEltSize) {
4781 assert((SrcEltSize % ResEltSize == 0) && "Invalid element size");
4783 unsigned SrcSize = SrcVec.getValueType().getSizeInBits();
4784 unsigned ResSize = VT.getSizeInBits();
4786 if (SrcSize > ResSize) {
4787 assert((SrcSize % ResSize == 0) && "Invalid vector size");
4789 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
4790 SrcSize / ResEltSize);
4791 BitCast = DAG.getNode(ISD::BITCAST, DL, CastVT, SrcVec);
4793 assert((SrcSize == ResSize) && "Invalid vector size of source vec");
4794 BitCast = DAG.getNode(ISD::BITCAST, DL, VT, SrcVec);
4797 unsigned LaneIdx = V->getConstantOperandVal(1);
4799 DAG.getConstant((SrcEltSize / ResEltSize) * LaneIdx, MVT::i64);
4800 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, BitCast, Lane);
4802 assert((SrcEltSize == ResEltSize) &&
4803 "Invalid element size of source vec");
4804 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, V->getOperand(0),
4808 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4810 if (!usesOnlyOneValue) {
4811 // The dominant value was splatted as 'N', but we now have to insert
4812 // all differing elements.
4813 for (unsigned I = 0; I < NumElts; ++I) {
4814 if (Op.getOperand(I) == Value)
4816 SmallVector<SDValue, 3> Ops;
4818 Ops.push_back(Op.getOperand(I));
4819 Ops.push_back(DAG.getConstant(I, MVT::i64));
4820 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
4825 if (usesOnlyOneValue && isConstant) {
4826 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4829 // If all elements are constants and the case above didn't get hit, fall back
4830 // to the default expansion, which will generate a load from the constant
4835 // Try to lower this in lowering ShuffleVector way.
4838 if (isKnownShuffleVector(Op, DAG, V0, V1, Mask)) {
4839 unsigned V0NumElts = V0.getValueType().getVectorNumElements();
4840 if (!V1.getNode() && V0NumElts == NumElts * 2) {
4841 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
4842 DAG.getConstant(NumElts, MVT::i64));
4843 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
4844 DAG.getConstant(0, MVT::i64));
4845 V0NumElts = V0.getValueType().getVectorNumElements();
4848 if (V1.getNode() && NumElts == V0NumElts &&
4849 V0NumElts == V1.getValueType().getVectorNumElements()) {
4850 SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask);
4851 if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE)
4854 return LowerVECTOR_SHUFFLE(Shuffle, DAG);
4857 if (isConcatVector(Op, DAG, V0, V1, Mask, Res))
4862 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
4863 // know the default expansion would otherwise fall back on something even
4864 // worse. For a vector with one or two non-undef values, that's
4865 // scalar_to_vector for the elements followed by a shuffle (provided the
4866 // shuffle is valid for the target) and materialization element by element
4867 // on the stack followed by a load for everything else.
4868 if (!isConstant && !usesOnlyOneValue) {
4869 SDValue Vec = DAG.getUNDEF(VT);
4870 for (unsigned i = 0 ; i < NumElts; ++i) {
4871 SDValue V = Op.getOperand(i);
4872 if (V.getOpcode() == ISD::UNDEF)
4874 SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
4875 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
4882 /// isREVMask - Check if a vector shuffle corresponds to a REV
4883 /// instruction with the specified blocksize. (The order of the elements
4884 /// within each block of the vector is reversed.)
4885 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
4886 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
4887 "Only possible block sizes for REV are: 16, 32, 64");
4889 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4893 unsigned NumElts = VT.getVectorNumElements();
4894 unsigned BlockElts = M[0] + 1;
4895 // If the first shuffle index is UNDEF, be optimistic.
4897 BlockElts = BlockSize / EltSz;
4899 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4902 for (unsigned i = 0; i < NumElts; ++i) {
4904 continue; // ignore UNDEF indices
4905 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
4912 // isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and
4914 static unsigned isPermuteMask(ArrayRef<int> M, EVT VT, bool isV2undef) {
4915 unsigned NumElts = VT.getVectorNumElements();
4919 bool ismatch = true;
4922 for (unsigned i = 0; i < NumElts; ++i) {
4923 unsigned answer = i * 2;
4924 if (isV2undef && answer >= NumElts)
4926 if (M[i] != -1 && (unsigned)M[i] != answer) {
4932 return AArch64ISD::NEON_UZP1;
4936 for (unsigned i = 0; i < NumElts; ++i) {
4937 unsigned answer = i * 2 + 1;
4938 if (isV2undef && answer >= NumElts)
4940 if (M[i] != -1 && (unsigned)M[i] != answer) {
4946 return AArch64ISD::NEON_UZP2;
4950 for (unsigned i = 0; i < NumElts; ++i) {
4951 unsigned answer = i / 2 + NumElts * (i % 2);
4952 if (isV2undef && answer >= NumElts)
4954 if (M[i] != -1 && (unsigned)M[i] != answer) {
4960 return AArch64ISD::NEON_ZIP1;
4964 for (unsigned i = 0; i < NumElts; ++i) {
4965 unsigned answer = (NumElts + i) / 2 + NumElts * (i % 2);
4966 if (isV2undef && answer >= NumElts)
4968 if (M[i] != -1 && (unsigned)M[i] != answer) {
4974 return AArch64ISD::NEON_ZIP2;
4978 for (unsigned i = 0; i < NumElts; ++i) {
4979 unsigned answer = i + (NumElts - 1) * (i % 2);
4980 if (isV2undef && answer >= NumElts)
4982 if (M[i] != -1 && (unsigned)M[i] != answer) {
4988 return AArch64ISD::NEON_TRN1;
4992 for (unsigned i = 0; i < NumElts; ++i) {
4993 unsigned answer = 1 + i + (NumElts - 1) * (i % 2);
4994 if (isV2undef && answer >= NumElts)
4996 if (M[i] != -1 && (unsigned)M[i] != answer) {
5002 return AArch64ISD::NEON_TRN2;
5008 AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
5009 SelectionDAG &DAG) const {
5010 SDValue V1 = Op.getOperand(0);
5011 SDValue V2 = Op.getOperand(1);
5013 EVT VT = Op.getValueType();
5014 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
5016 // Convert shuffles that are directly supported on NEON to target-specific
5017 // DAG nodes, instead of keeping them as shuffles and matching them again
5018 // during code selection. This is more efficient and avoids the possibility
5019 // of inconsistencies between legalization and selection.
5020 ArrayRef<int> ShuffleMask = SVN->getMask();
5022 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
5026 if (isREVMask(ShuffleMask, VT, 64))
5027 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1);
5028 if (isREVMask(ShuffleMask, VT, 32))
5029 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1);
5030 if (isREVMask(ShuffleMask, VT, 16))
5031 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1);
5034 if (V2.getOpcode() == ISD::UNDEF)
5035 ISDNo = isPermuteMask(ShuffleMask, VT, true);
5037 ISDNo = isPermuteMask(ShuffleMask, VT, false);
5040 if (V2.getOpcode() == ISD::UNDEF)
5041 return DAG.getNode(ISDNo, dl, VT, V1, V1);
5043 return DAG.getNode(ISDNo, dl, VT, V1, V2);
5047 if (isConcatVector(Op, DAG, V1, V2, &ShuffleMask[0], Res))
5050 // If the element of shuffle mask are all the same constant, we can
5051 // transform it into either NEON_VDUP or NEON_VDUPLANE
5052 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
5053 int Lane = SVN->getSplatIndex();
5054 // If this is undef splat, generate it via "just" vdup, if possible.
5055 if (Lane == -1) Lane = 0;
5057 // Test if V1 is a SCALAR_TO_VECTOR.
5058 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5059 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
5061 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
5062 if (V1.getOpcode() == ISD::BUILD_VECTOR) {
5063 bool IsScalarToVector = true;
5064 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
5065 if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
5066 i != (unsigned)Lane) {
5067 IsScalarToVector = false;
5070 if (IsScalarToVector)
5071 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
5072 V1.getOperand(Lane));
5075 // Test if V1 is a EXTRACT_SUBVECTOR.
5076 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
5077 int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
5078 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0),
5079 DAG.getConstant(Lane + ExtLane, MVT::i64));
5081 // Test if V1 is a CONCAT_VECTORS.
5082 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
5083 V1.getOperand(1).getOpcode() == ISD::UNDEF) {
5084 SDValue Op0 = V1.getOperand(0);
5085 assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() &&
5086 "Invalid vector lane access");
5087 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0,
5088 DAG.getConstant(Lane, MVT::i64));
5091 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
5092 DAG.getConstant(Lane, MVT::i64));
5095 int Length = ShuffleMask.size();
5096 int V1EltNum = V1.getValueType().getVectorNumElements();
5098 // If the number of v1 elements is the same as the number of shuffle mask
5099 // element and the shuffle masks are sequential values, we can transform
5100 // it into NEON_VEXTRACT.
5101 if (V1EltNum == Length) {
5102 // Check if the shuffle mask is sequential.
5104 while (ShuffleMask[SkipUndef] == -1) {
5107 int CurMask = ShuffleMask[SkipUndef];
5108 if (CurMask >= SkipUndef) {
5109 bool IsSequential = true;
5110 for (int I = SkipUndef; I < Length; ++I) {
5111 if (ShuffleMask[I] != -1 && ShuffleMask[I] != CurMask) {
5112 IsSequential = false;
5118 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
5119 unsigned VecSize = EltSize * V1EltNum;
5120 unsigned Index = (EltSize / 8) * (ShuffleMask[SkipUndef] - SkipUndef);
5121 if (VecSize == 64 || VecSize == 128)
5122 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
5123 DAG.getConstant(Index, MVT::i64));
5128 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
5129 // by element from V2 to V1 .
5130 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
5131 // better choice to be inserted than V1 as less insert needed, so we count
5132 // element to be inserted for both V1 and V2, and select less one as insert
5135 // Collect elements need to be inserted and their index.
5136 SmallVector<int, 8> NV1Elt;
5137 SmallVector<int, 8> N1Index;
5138 SmallVector<int, 8> NV2Elt;
5139 SmallVector<int, 8> N2Index;
5140 for (int I = 0; I != Length; ++I) {
5141 if (ShuffleMask[I] != I) {
5142 NV1Elt.push_back(ShuffleMask[I]);
5143 N1Index.push_back(I);
5146 for (int I = 0; I != Length; ++I) {
5147 if (ShuffleMask[I] != (I + V1EltNum)) {
5148 NV2Elt.push_back(ShuffleMask[I]);
5149 N2Index.push_back(I);
5153 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
5154 // will be inserted.
5156 SmallVector<int, 8> InsMasks = NV1Elt;
5157 SmallVector<int, 8> InsIndex = N1Index;
5158 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
5159 if (NV1Elt.size() > NV2Elt.size()) {
5165 InsV = DAG.getNode(ISD::UNDEF, dl, VT);
5168 for (int I = 0, E = InsMasks.size(); I != E; ++I) {
5170 int Mask = InsMasks[I];
5171 if (Mask >= V1EltNum) {
5175 // Any value type smaller than i32 is illegal in AArch64, and this lower
5176 // function is called after legalize pass, so we need to legalize
5179 if (VT.getVectorElementType().isFloatingPoint())
5180 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
5182 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
5185 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
5186 DAG.getConstant(Mask, MVT::i64));
5187 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
5188 DAG.getConstant(InsIndex[I], MVT::i64));
5194 AArch64TargetLowering::ConstraintType
5195 AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
5196 if (Constraint.size() == 1) {
5197 switch (Constraint[0]) {
5199 case 'w': // An FP/SIMD vector register
5200 return C_RegisterClass;
5201 case 'I': // Constant that can be used with an ADD instruction
5202 case 'J': // Constant that can be used with a SUB instruction
5203 case 'K': // Constant that can be used with a 32-bit logical instruction
5204 case 'L': // Constant that can be used with a 64-bit logical instruction
5205 case 'M': // Constant that can be used as a 32-bit MOV immediate
5206 case 'N': // Constant that can be used as a 64-bit MOV immediate
5207 case 'Y': // Floating point constant zero
5208 case 'Z': // Integer constant zero
5210 case 'Q': // A memory reference with base register and no offset
5212 case 'S': // A symbolic address
5217 // FIXME: Ump, Utf, Usa, Ush
5218 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
5219 // whatever they may be
5220 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
5221 // Usa: An absolute symbolic address
5222 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
5223 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
5224 && Constraint != "Ush" && "Unimplemented constraints");
5226 return TargetLowering::getConstraintType(Constraint);
5229 TargetLowering::ConstraintWeight
5230 AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
5231 const char *Constraint) const {
5233 llvm_unreachable("Constraint weight unimplemented");
5237 AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
5238 std::string &Constraint,
5239 std::vector<SDValue> &Ops,
5240 SelectionDAG &DAG) const {
5241 SDValue Result(0, 0);
5243 // Only length 1 constraints are C_Other.
5244 if (Constraint.size() != 1) return;
5246 // Only C_Other constraints get lowered like this. That means constants for us
5247 // so return early if there's no hope the constraint can be lowered.
5249 switch(Constraint[0]) {
5251 case 'I': case 'J': case 'K': case 'L':
5252 case 'M': case 'N': case 'Z': {
5253 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5257 uint64_t CVal = C->getZExtValue();
5260 switch (Constraint[0]) {
5262 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
5263 // is a peculiarly useless SUB constraint.
5264 llvm_unreachable("Unimplemented C_Other constraint");
5270 if (A64Imms::isLogicalImm(32, CVal, Bits))
5274 if (A64Imms::isLogicalImm(64, CVal, Bits))
5283 Result = DAG.getTargetConstant(CVal, Op.getValueType());
5287 // An absolute symbolic address or label reference.
5288 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
5289 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
5290 GA->getValueType(0));
5291 } else if (const BlockAddressSDNode *BA
5292 = dyn_cast<BlockAddressSDNode>(Op)) {
5293 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
5294 BA->getValueType(0));
5295 } else if (const ExternalSymbolSDNode *ES
5296 = dyn_cast<ExternalSymbolSDNode>(Op)) {
5297 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
5298 ES->getValueType(0));
5304 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
5305 if (CFP->isExactlyValue(0.0)) {
5306 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
5313 if (Result.getNode()) {
5314 Ops.push_back(Result);
5318 // It's an unknown constraint for us. Let generic code have a go.
5319 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
5322 std::pair<unsigned, const TargetRegisterClass*>
5323 AArch64TargetLowering::getRegForInlineAsmConstraint(
5324 const std::string &Constraint,
5326 if (Constraint.size() == 1) {
5327 switch (Constraint[0]) {
5329 if (VT.getSizeInBits() <= 32)
5330 return std::make_pair(0U, &AArch64::GPR32RegClass);
5331 else if (VT == MVT::i64)
5332 return std::make_pair(0U, &AArch64::GPR64RegClass);
5336 return std::make_pair(0U, &AArch64::FPR16RegClass);
5337 else if (VT == MVT::f32)
5338 return std::make_pair(0U, &AArch64::FPR32RegClass);
5339 else if (VT.getSizeInBits() == 64)
5340 return std::make_pair(0U, &AArch64::FPR64RegClass);
5341 else if (VT.getSizeInBits() == 128)
5342 return std::make_pair(0U, &AArch64::FPR128RegClass);
5347 // Use the default implementation in TargetLowering to convert the register
5348 // constraint into a member of a register class.
5349 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
5352 /// Represent NEON load and store intrinsics as MemIntrinsicNodes.
5353 /// The associated MachineMemOperands record the alignment specified
5354 /// in the intrinsic calls.
5355 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5357 unsigned Intrinsic) const {
5358 switch (Intrinsic) {
5359 case Intrinsic::arm_neon_vld1:
5360 case Intrinsic::arm_neon_vld2:
5361 case Intrinsic::arm_neon_vld3:
5362 case Intrinsic::arm_neon_vld4:
5363 case Intrinsic::aarch64_neon_vld1x2:
5364 case Intrinsic::aarch64_neon_vld1x3:
5365 case Intrinsic::aarch64_neon_vld1x4:
5366 case Intrinsic::arm_neon_vld2lane:
5367 case Intrinsic::arm_neon_vld3lane:
5368 case Intrinsic::arm_neon_vld4lane: {
5369 Info.opc = ISD::INTRINSIC_W_CHAIN;
5370 // Conservatively set memVT to the entire set of vectors loaded.
5371 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
5372 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
5373 Info.ptrVal = I.getArgOperand(0);
5375 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
5376 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
5377 Info.vol = false; // volatile loads with NEON intrinsics not supported
5378 Info.readMem = true;
5379 Info.writeMem = false;
5382 case Intrinsic::arm_neon_vst1:
5383 case Intrinsic::arm_neon_vst2:
5384 case Intrinsic::arm_neon_vst3:
5385 case Intrinsic::arm_neon_vst4:
5386 case Intrinsic::aarch64_neon_vst1x2:
5387 case Intrinsic::aarch64_neon_vst1x3:
5388 case Intrinsic::aarch64_neon_vst1x4:
5389 case Intrinsic::arm_neon_vst2lane:
5390 case Intrinsic::arm_neon_vst3lane:
5391 case Intrinsic::arm_neon_vst4lane: {
5392 Info.opc = ISD::INTRINSIC_VOID;
5393 // Conservatively set memVT to the entire set of vectors stored.
5394 unsigned NumElts = 0;
5395 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
5396 Type *ArgTy = I.getArgOperand(ArgI)->getType();
5397 if (!ArgTy->isVectorTy())
5399 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
5401 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
5402 Info.ptrVal = I.getArgOperand(0);
5404 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
5405 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
5406 Info.vol = false; // volatile stores with NEON intrinsics not supported
5407 Info.readMem = false;
5408 Info.writeMem = true;
5418 // Truncations from 64-bit GPR to 32-bit GPR is free.
5419 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
5420 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
5422 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
5423 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
5424 if (NumBits1 <= NumBits2)
5429 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
5430 if (!VT1.isInteger() || !VT2.isInteger())
5432 unsigned NumBits1 = VT1.getSizeInBits();
5433 unsigned NumBits2 = VT2.getSizeInBits();
5434 if (NumBits1 <= NumBits2)
5439 // All 32-bit GPR operations implicitly zero the high-half of the corresponding
5441 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
5442 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
5444 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
5445 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
5446 if (NumBits1 == 32 && NumBits2 == 64)
5451 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
5452 if (!VT1.isInteger() || !VT2.isInteger())
5454 unsigned NumBits1 = VT1.getSizeInBits();
5455 unsigned NumBits2 = VT2.getSizeInBits();
5456 if (NumBits1 == 32 && NumBits2 == 64)
5461 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
5462 EVT VT1 = Val.getValueType();
5463 if (isZExtFree(VT1, VT2)) {
5467 if (Val.getOpcode() != ISD::LOAD)
5470 // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
5471 return (VT1.isSimple() && VT1.isInteger() && VT2.isSimple() &&
5472 VT2.isInteger() && VT1.getSizeInBits() <= 32);
5475 // isLegalAddressingMode - Return true if the addressing mode represented
5476 /// by AM is legal for this target, for a load/store of the specified type.
5477 bool AArch64TargetLowering::isLegalAddressingMode(const AddrMode &AM,
5479 // AArch64 has five basic addressing modes:
5481 // reg + 9-bit signed offset
5482 // reg + SIZE_IN_BYTES * 12-bit unsigned offset
5484 // reg + SIZE_IN_BYTES * reg
5486 // No global is ever allowed as a base.
5490 // No reg+reg+imm addressing.
5491 if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
5494 // check reg + imm case:
5495 // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
5496 uint64_t NumBytes = 0;
5497 if (Ty->isSized()) {
5498 uint64_t NumBits = getDataLayout()->getTypeSizeInBits(Ty);
5499 NumBytes = NumBits / 8;
5500 if (!isPowerOf2_64(NumBits))
5505 int64_t Offset = AM.BaseOffs;
5507 // 9-bit signed offset
5508 if (Offset >= -(1LL << 9) && Offset <= (1LL << 9) - 1)
5511 // 12-bit unsigned offset
5512 unsigned shift = Log2_64(NumBytes);
5513 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
5514 // Must be a multiple of NumBytes (NumBytes is a power of 2)
5515 (Offset >> shift) << shift == Offset)
5519 if (!AM.Scale || AM.Scale == 1 ||
5520 (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes))
5525 int AArch64TargetLowering::getScalingFactorCost(const AddrMode &AM,
5527 // Scaling factors are not free at all.
5528 // Operands | Rt Latency
5529 // -------------------------------------------
5531 // -------------------------------------------
5532 // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5
5533 // Rt, [Xn, Wm, <extend> #imm] |
5534 if (isLegalAddressingMode(AM, Ty))
5535 // Scale represents reg2 * scale, thus account for 1 if
5536 // it is not equal to 0 or 1.
5537 return AM.Scale != 0 && AM.Scale != 1;