1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "aarch64-isel"
17 #include "AArch64ISelLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "AArch64TargetObjectFile.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/CallingConv.h"
32 static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
35 if (Subtarget->isTargetLinux())
36 return new AArch64LinuxTargetObjectFile();
37 if (Subtarget->isTargetELF())
38 return new TargetLoweringObjectFileELF();
39 llvm_unreachable("unknown subtarget type");
42 AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
43 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
45 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
47 // SIMD compares set the entire lane's bits to 1
48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
50 // Scalar register <-> type mapping
51 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
52 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
54 if (Subtarget->hasFPARMv8()) {
55 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
56 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
57 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
58 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
61 if (Subtarget->hasNEON()) {
63 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass);
64 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
65 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
66 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
67 addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass);
68 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
69 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
70 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
71 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
72 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
73 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
74 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
75 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
76 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
77 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
78 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
79 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
82 computeRegisterProperties();
84 // We combine OR nodes for bitfield and NEON BSL operations.
85 setTargetDAGCombine(ISD::OR);
87 setTargetDAGCombine(ISD::AND);
88 setTargetDAGCombine(ISD::SRA);
89 setTargetDAGCombine(ISD::SRL);
90 setTargetDAGCombine(ISD::SHL);
92 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
94 // AArch64 does not have i1 loads, or much of anything for i1 really.
95 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
96 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
97 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
99 setStackPointerRegisterToSaveRestore(AArch64::XSP);
100 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
101 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
102 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
104 // We'll lower globals to wrappers for selection.
105 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
106 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
108 // A64 instructions have the comparison predicate attached to the user of the
109 // result, but having a separate comparison is valuable for matching.
110 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
111 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
112 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
113 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
115 setOperationAction(ISD::SELECT, MVT::i32, Custom);
116 setOperationAction(ISD::SELECT, MVT::i64, Custom);
117 setOperationAction(ISD::SELECT, MVT::f32, Custom);
118 setOperationAction(ISD::SELECT, MVT::f64, Custom);
120 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
121 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
122 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
123 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
125 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
127 setOperationAction(ISD::SETCC, MVT::i32, Custom);
128 setOperationAction(ISD::SETCC, MVT::i64, Custom);
129 setOperationAction(ISD::SETCC, MVT::f32, Custom);
130 setOperationAction(ISD::SETCC, MVT::f64, Custom);
132 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
133 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
134 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
136 setOperationAction(ISD::VASTART, MVT::Other, Custom);
137 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
138 setOperationAction(ISD::VAEND, MVT::Other, Expand);
139 setOperationAction(ISD::VAARG, MVT::Other, Expand);
141 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
143 setOperationAction(ISD::ROTL, MVT::i32, Expand);
144 setOperationAction(ISD::ROTL, MVT::i64, Expand);
146 setOperationAction(ISD::UREM, MVT::i32, Expand);
147 setOperationAction(ISD::UREM, MVT::i64, Expand);
148 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
149 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
151 setOperationAction(ISD::SREM, MVT::i32, Expand);
152 setOperationAction(ISD::SREM, MVT::i64, Expand);
153 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
154 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
156 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
157 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
159 // Legal floating-point operations.
160 setOperationAction(ISD::FABS, MVT::f32, Legal);
161 setOperationAction(ISD::FABS, MVT::f64, Legal);
163 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
164 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
166 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
167 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
169 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
170 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
172 setOperationAction(ISD::FNEG, MVT::f32, Legal);
173 setOperationAction(ISD::FNEG, MVT::f64, Legal);
175 setOperationAction(ISD::FRINT, MVT::f32, Legal);
176 setOperationAction(ISD::FRINT, MVT::f64, Legal);
178 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
179 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
181 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
182 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
184 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
185 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
186 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
188 // Illegal floating-point operations.
189 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
190 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
192 setOperationAction(ISD::FCOS, MVT::f32, Expand);
193 setOperationAction(ISD::FCOS, MVT::f64, Expand);
195 setOperationAction(ISD::FEXP, MVT::f32, Expand);
196 setOperationAction(ISD::FEXP, MVT::f64, Expand);
198 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
199 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
201 setOperationAction(ISD::FLOG, MVT::f32, Expand);
202 setOperationAction(ISD::FLOG, MVT::f64, Expand);
204 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
205 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
207 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
208 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
210 setOperationAction(ISD::FPOW, MVT::f32, Expand);
211 setOperationAction(ISD::FPOW, MVT::f64, Expand);
213 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
214 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
216 setOperationAction(ISD::FREM, MVT::f32, Expand);
217 setOperationAction(ISD::FREM, MVT::f64, Expand);
219 setOperationAction(ISD::FSIN, MVT::f32, Expand);
220 setOperationAction(ISD::FSIN, MVT::f64, Expand);
222 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
223 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
225 // Virtually no operation on f128 is legal, but LLVM can't expand them when
226 // there's a valid register class, so we need custom operations in most cases.
227 setOperationAction(ISD::FABS, MVT::f128, Expand);
228 setOperationAction(ISD::FADD, MVT::f128, Custom);
229 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
230 setOperationAction(ISD::FCOS, MVT::f128, Expand);
231 setOperationAction(ISD::FDIV, MVT::f128, Custom);
232 setOperationAction(ISD::FMA, MVT::f128, Expand);
233 setOperationAction(ISD::FMUL, MVT::f128, Custom);
234 setOperationAction(ISD::FNEG, MVT::f128, Expand);
235 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
236 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
237 setOperationAction(ISD::FPOW, MVT::f128, Expand);
238 setOperationAction(ISD::FREM, MVT::f128, Expand);
239 setOperationAction(ISD::FRINT, MVT::f128, Expand);
240 setOperationAction(ISD::FSIN, MVT::f128, Expand);
241 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
242 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
243 setOperationAction(ISD::FSUB, MVT::f128, Custom);
244 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
245 setOperationAction(ISD::SETCC, MVT::f128, Custom);
246 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
247 setOperationAction(ISD::SELECT, MVT::f128, Expand);
248 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
249 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
251 // Lowering for many of the conversions is actually specified by the non-f128
252 // type. The LowerXXX function will be trivial when f128 isn't involved.
253 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
254 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
255 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
256 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
257 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
258 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
259 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
260 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
261 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
262 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
263 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
264 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
265 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
266 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
268 // This prevents LLVM trying to compress double constants into a floating
269 // constant-pool entry and trying to load from there. It's of doubtful benefit
270 // for A64: we'd need LDR followed by FCVT, I believe.
271 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
272 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
273 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
275 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
276 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
277 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
278 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
279 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
280 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
282 setExceptionPointerRegister(AArch64::X0);
283 setExceptionSelectorRegister(AArch64::X1);
285 if (Subtarget->hasNEON()) {
286 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
287 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
288 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
289 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
290 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
291 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
292 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
293 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
294 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
295 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
296 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
297 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom);
298 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
299 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
300 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
301 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
303 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
304 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
306 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
311 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
313 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
316 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
317 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
318 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
319 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
320 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
321 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
322 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
323 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
324 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
326 setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
327 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
328 setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
329 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
330 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
331 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
332 setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
333 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
334 setOperationAction(ISD::SETCC, MVT::v1f32, Custom);
335 setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
336 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
337 setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
338 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
342 EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
343 // It's reasonably important that this value matches the "natural" legal
344 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
345 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
346 if (!VT.isVector()) return MVT::i32;
347 return VT.changeVectorElementTypeToInteger();
350 static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
353 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
354 AArch64::LDXR_word, AArch64::LDXR_dword};
355 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
356 AArch64::LDAXR_word, AArch64::LDAXR_dword};
357 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
358 AArch64::STXR_word, AArch64::STXR_dword};
359 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
360 AArch64::STLXR_word, AArch64::STLXR_dword};
362 const unsigned *LoadOps, *StoreOps;
363 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
368 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
369 StoreOps = StoreRels;
371 StoreOps = StoreBares;
373 assert(isPowerOf2_32(Size) && Size <= 8 &&
374 "unsupported size for atomic binary op!");
376 LdrOpc = LoadOps[Log2_32(Size)];
377 StrOpc = StoreOps[Log2_32(Size)];
381 AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
383 unsigned BinOpcode) const {
384 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
385 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
387 const BasicBlock *LLVM_BB = BB->getBasicBlock();
388 MachineFunction *MF = BB->getParent();
389 MachineFunction::iterator It = BB;
392 unsigned dest = MI->getOperand(0).getReg();
393 unsigned ptr = MI->getOperand(1).getReg();
394 unsigned incr = MI->getOperand(2).getReg();
395 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
396 DebugLoc dl = MI->getDebugLoc();
398 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
400 unsigned ldrOpc, strOpc;
401 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
403 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
404 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
405 MF->insert(It, loopMBB);
406 MF->insert(It, exitMBB);
408 // Transfer the remainder of BB and its successor edges to exitMBB.
409 exitMBB->splice(exitMBB->begin(), BB,
410 llvm::next(MachineBasicBlock::iterator(MI)),
412 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
414 const TargetRegisterClass *TRC
415 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
416 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
420 // fallthrough --> loopMBB
421 BB->addSuccessor(loopMBB);
425 // <binop> scratch, dest, incr
426 // stxr stxr_status, scratch, ptr
427 // cbnz stxr_status, loopMBB
428 // fallthrough --> exitMBB
430 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
432 // All arithmetic operations we'll be creating are designed to take an extra
433 // shift or extend operand, which we can conveniently set to zero.
435 // Operand order needs to go the other way for NAND.
436 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
437 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
438 .addReg(incr).addReg(dest).addImm(0);
440 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
441 .addReg(dest).addReg(incr).addImm(0);
444 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
445 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
446 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
448 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
449 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
450 .addReg(stxr_status).addMBB(loopMBB);
452 BB->addSuccessor(loopMBB);
453 BB->addSuccessor(exitMBB);
459 MI->eraseFromParent(); // The instruction is gone now.
465 AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
466 MachineBasicBlock *BB,
469 A64CC::CondCodes Cond) const {
470 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
472 const BasicBlock *LLVM_BB = BB->getBasicBlock();
473 MachineFunction *MF = BB->getParent();
474 MachineFunction::iterator It = BB;
477 unsigned dest = MI->getOperand(0).getReg();
478 unsigned ptr = MI->getOperand(1).getReg();
479 unsigned incr = MI->getOperand(2).getReg();
480 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
482 unsigned oldval = dest;
483 DebugLoc dl = MI->getDebugLoc();
485 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
486 const TargetRegisterClass *TRC, *TRCsp;
488 TRC = &AArch64::GPR64RegClass;
489 TRCsp = &AArch64::GPR64xspRegClass;
491 TRC = &AArch64::GPR32RegClass;
492 TRCsp = &AArch64::GPR32wspRegClass;
495 unsigned ldrOpc, strOpc;
496 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
498 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
499 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
500 MF->insert(It, loopMBB);
501 MF->insert(It, exitMBB);
503 // Transfer the remainder of BB and its successor edges to exitMBB.
504 exitMBB->splice(exitMBB->begin(), BB,
505 llvm::next(MachineBasicBlock::iterator(MI)),
507 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
509 unsigned scratch = MRI.createVirtualRegister(TRC);
510 MRI.constrainRegClass(scratch, TRCsp);
514 // fallthrough --> loopMBB
515 BB->addSuccessor(loopMBB);
519 // cmp incr, dest (, sign extend if necessary)
520 // csel scratch, dest, incr, cond
521 // stxr stxr_status, scratch, ptr
522 // cbnz stxr_status, loopMBB
523 // fallthrough --> exitMBB
525 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
527 // Build compare and cmov instructions.
528 MRI.constrainRegClass(incr, TRCsp);
529 BuildMI(BB, dl, TII->get(CmpOp))
530 .addReg(incr).addReg(oldval).addImm(0);
532 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
534 .addReg(oldval).addReg(incr).addImm(Cond);
536 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
537 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
539 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
540 .addReg(scratch).addReg(ptr);
541 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
542 .addReg(stxr_status).addMBB(loopMBB);
544 BB->addSuccessor(loopMBB);
545 BB->addSuccessor(exitMBB);
551 MI->eraseFromParent(); // The instruction is gone now.
557 AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
558 MachineBasicBlock *BB,
559 unsigned Size) const {
560 unsigned dest = MI->getOperand(0).getReg();
561 unsigned ptr = MI->getOperand(1).getReg();
562 unsigned oldval = MI->getOperand(2).getReg();
563 unsigned newval = MI->getOperand(3).getReg();
564 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
565 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
566 DebugLoc dl = MI->getDebugLoc();
568 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
569 const TargetRegisterClass *TRCsp;
570 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
572 unsigned ldrOpc, strOpc;
573 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
575 MachineFunction *MF = BB->getParent();
576 const BasicBlock *LLVM_BB = BB->getBasicBlock();
577 MachineFunction::iterator It = BB;
578 ++It; // insert the new blocks after the current block
580 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
581 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
582 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
583 MF->insert(It, loop1MBB);
584 MF->insert(It, loop2MBB);
585 MF->insert(It, exitMBB);
587 // Transfer the remainder of BB and its successor edges to exitMBB.
588 exitMBB->splice(exitMBB->begin(), BB,
589 llvm::next(MachineBasicBlock::iterator(MI)),
591 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
595 // fallthrough --> loop1MBB
596 BB->addSuccessor(loop1MBB);
603 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
605 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
606 MRI.constrainRegClass(dest, TRCsp);
607 BuildMI(BB, dl, TII->get(CmpOp))
608 .addReg(dest).addReg(oldval).addImm(0);
609 BuildMI(BB, dl, TII->get(AArch64::Bcc))
610 .addImm(A64CC::NE).addMBB(exitMBB);
611 BB->addSuccessor(loop2MBB);
612 BB->addSuccessor(exitMBB);
615 // strex stxr_status, newval, [ptr]
616 // cbnz stxr_status, loop1MBB
618 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
619 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
621 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
622 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
623 .addReg(stxr_status).addMBB(loop1MBB);
624 BB->addSuccessor(loop1MBB);
625 BB->addSuccessor(exitMBB);
631 MI->eraseFromParent(); // The instruction is gone now.
637 AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
638 MachineBasicBlock *MBB) const {
639 // We materialise the F128CSEL pseudo-instruction using conditional branches
640 // and loads, giving an instruciton sequence like:
649 // Using virtual registers would probably not be beneficial since COPY
650 // instructions are expensive for f128 (there's no actual instruction to
653 // An alternative would be to do an integer-CSEL on some address. E.g.:
658 // csel x0, x0, x1, ne
661 // It's unclear which approach is actually optimal.
662 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
663 MachineFunction *MF = MBB->getParent();
664 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
665 DebugLoc DL = MI->getDebugLoc();
666 MachineFunction::iterator It = MBB;
669 unsigned DestReg = MI->getOperand(0).getReg();
670 unsigned IfTrueReg = MI->getOperand(1).getReg();
671 unsigned IfFalseReg = MI->getOperand(2).getReg();
672 unsigned CondCode = MI->getOperand(3).getImm();
673 bool NZCVKilled = MI->getOperand(4).isKill();
675 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
676 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
677 MF->insert(It, TrueBB);
678 MF->insert(It, EndBB);
680 // Transfer rest of current basic-block to EndBB
681 EndBB->splice(EndBB->begin(), MBB,
682 llvm::next(MachineBasicBlock::iterator(MI)),
684 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
686 // We need somewhere to store the f128 value needed.
687 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
689 // [... start of incoming MBB ...]
690 // str qIFFALSE, [sp]
693 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
695 .addFrameIndex(ScratchFI)
697 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
700 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
702 MBB->addSuccessor(TrueBB);
703 MBB->addSuccessor(EndBB);
706 // NZCV is live-through TrueBB.
707 TrueBB->addLiveIn(AArch64::NZCV);
708 EndBB->addLiveIn(AArch64::NZCV);
713 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
715 .addFrameIndex(ScratchFI)
718 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
720 TrueBB->addSuccessor(EndBB);
724 // [... rest of incoming MBB ...]
725 MachineInstr *StartOfEnd = EndBB->begin();
726 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
727 .addFrameIndex(ScratchFI)
730 MI->eraseFromParent();
735 AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
736 MachineBasicBlock *MBB) const {
737 switch (MI->getOpcode()) {
738 default: llvm_unreachable("Unhandled instruction with custom inserter");
739 case AArch64::F128CSEL:
740 return EmitF128CSEL(MI, MBB);
741 case AArch64::ATOMIC_LOAD_ADD_I8:
742 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
743 case AArch64::ATOMIC_LOAD_ADD_I16:
744 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
745 case AArch64::ATOMIC_LOAD_ADD_I32:
746 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
747 case AArch64::ATOMIC_LOAD_ADD_I64:
748 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
750 case AArch64::ATOMIC_LOAD_SUB_I8:
751 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
752 case AArch64::ATOMIC_LOAD_SUB_I16:
753 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
754 case AArch64::ATOMIC_LOAD_SUB_I32:
755 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
756 case AArch64::ATOMIC_LOAD_SUB_I64:
757 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
759 case AArch64::ATOMIC_LOAD_AND_I8:
760 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
761 case AArch64::ATOMIC_LOAD_AND_I16:
762 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
763 case AArch64::ATOMIC_LOAD_AND_I32:
764 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
765 case AArch64::ATOMIC_LOAD_AND_I64:
766 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
768 case AArch64::ATOMIC_LOAD_OR_I8:
769 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
770 case AArch64::ATOMIC_LOAD_OR_I16:
771 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
772 case AArch64::ATOMIC_LOAD_OR_I32:
773 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
774 case AArch64::ATOMIC_LOAD_OR_I64:
775 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
777 case AArch64::ATOMIC_LOAD_XOR_I8:
778 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
779 case AArch64::ATOMIC_LOAD_XOR_I16:
780 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
781 case AArch64::ATOMIC_LOAD_XOR_I32:
782 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
783 case AArch64::ATOMIC_LOAD_XOR_I64:
784 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
786 case AArch64::ATOMIC_LOAD_NAND_I8:
787 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
788 case AArch64::ATOMIC_LOAD_NAND_I16:
789 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
790 case AArch64::ATOMIC_LOAD_NAND_I32:
791 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
792 case AArch64::ATOMIC_LOAD_NAND_I64:
793 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
795 case AArch64::ATOMIC_LOAD_MIN_I8:
796 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
797 case AArch64::ATOMIC_LOAD_MIN_I16:
798 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
799 case AArch64::ATOMIC_LOAD_MIN_I32:
800 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
801 case AArch64::ATOMIC_LOAD_MIN_I64:
802 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
804 case AArch64::ATOMIC_LOAD_MAX_I8:
805 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
806 case AArch64::ATOMIC_LOAD_MAX_I16:
807 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
808 case AArch64::ATOMIC_LOAD_MAX_I32:
809 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
810 case AArch64::ATOMIC_LOAD_MAX_I64:
811 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
813 case AArch64::ATOMIC_LOAD_UMIN_I8:
814 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
815 case AArch64::ATOMIC_LOAD_UMIN_I16:
816 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
817 case AArch64::ATOMIC_LOAD_UMIN_I32:
818 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
819 case AArch64::ATOMIC_LOAD_UMIN_I64:
820 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
822 case AArch64::ATOMIC_LOAD_UMAX_I8:
823 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
824 case AArch64::ATOMIC_LOAD_UMAX_I16:
825 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
826 case AArch64::ATOMIC_LOAD_UMAX_I32:
827 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
828 case AArch64::ATOMIC_LOAD_UMAX_I64:
829 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
831 case AArch64::ATOMIC_SWAP_I8:
832 return emitAtomicBinary(MI, MBB, 1, 0);
833 case AArch64::ATOMIC_SWAP_I16:
834 return emitAtomicBinary(MI, MBB, 2, 0);
835 case AArch64::ATOMIC_SWAP_I32:
836 return emitAtomicBinary(MI, MBB, 4, 0);
837 case AArch64::ATOMIC_SWAP_I64:
838 return emitAtomicBinary(MI, MBB, 8, 0);
840 case AArch64::ATOMIC_CMP_SWAP_I8:
841 return emitAtomicCmpSwap(MI, MBB, 1);
842 case AArch64::ATOMIC_CMP_SWAP_I16:
843 return emitAtomicCmpSwap(MI, MBB, 2);
844 case AArch64::ATOMIC_CMP_SWAP_I32:
845 return emitAtomicCmpSwap(MI, MBB, 4);
846 case AArch64::ATOMIC_CMP_SWAP_I64:
847 return emitAtomicCmpSwap(MI, MBB, 8);
852 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
854 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
855 case AArch64ISD::Call: return "AArch64ISD::Call";
856 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
857 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
858 case AArch64ISD::BFI: return "AArch64ISD::BFI";
859 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
860 case AArch64ISD::Ret: return "AArch64ISD::Ret";
861 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
862 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
863 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
864 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
865 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
866 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
867 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
868 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
870 case AArch64ISD::NEON_BSL:
871 return "AArch64ISD::NEON_BSL";
872 case AArch64ISD::NEON_MOVIMM:
873 return "AArch64ISD::NEON_MOVIMM";
874 case AArch64ISD::NEON_MVNIMM:
875 return "AArch64ISD::NEON_MVNIMM";
876 case AArch64ISD::NEON_FMOVIMM:
877 return "AArch64ISD::NEON_FMOVIMM";
878 case AArch64ISD::NEON_CMP:
879 return "AArch64ISD::NEON_CMP";
880 case AArch64ISD::NEON_CMPZ:
881 return "AArch64ISD::NEON_CMPZ";
882 case AArch64ISD::NEON_TST:
883 return "AArch64ISD::NEON_TST";
884 case AArch64ISD::NEON_QSHLs:
885 return "AArch64ISD::NEON_QSHLs";
886 case AArch64ISD::NEON_QSHLu:
887 return "AArch64ISD::NEON_QSHLu";
888 case AArch64ISD::NEON_VDUP:
889 return "AArch64ISD::NEON_VDUP";
890 case AArch64ISD::NEON_VDUPLANE:
891 return "AArch64ISD::NEON_VDUPLANE";
897 static const uint16_t AArch64FPRArgRegs[] = {
898 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
899 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
901 static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
903 static const uint16_t AArch64ArgRegs[] = {
904 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
905 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
907 static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
909 static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
910 CCValAssign::LocInfo LocInfo,
911 ISD::ArgFlagsTy ArgFlags, CCState &State) {
912 // Mark all remaining general purpose registers as allocated. We don't
913 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
914 // i64 will go in registers (C.11).
915 for (unsigned i = 0; i < NumArgRegs; ++i)
916 State.AllocateReg(AArch64ArgRegs[i]);
921 #include "AArch64GenCallingConv.inc"
923 CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
926 default: llvm_unreachable("Unsupported calling convention");
927 case CallingConv::Fast:
934 AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
935 SDLoc DL, SDValue &Chain) const {
936 MachineFunction &MF = DAG.getMachineFunction();
937 MachineFrameInfo *MFI = MF.getFrameInfo();
938 AArch64MachineFunctionInfo *FuncInfo
939 = MF.getInfo<AArch64MachineFunctionInfo>();
941 SmallVector<SDValue, 8> MemOps;
943 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
945 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
948 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
950 if (GPRSaveSize != 0) {
951 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
953 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
955 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
956 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
957 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
958 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
959 MachinePointerInfo::getStack(i * 8),
961 MemOps.push_back(Store);
962 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
963 DAG.getConstant(8, getPointerTy()));
967 if (getSubtarget()->hasFPARMv8()) {
968 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
970 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
971 // can omit a register save area if we know we'll never use registers of
973 if (FPRSaveSize != 0) {
974 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
976 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
978 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
979 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
980 &AArch64::FPR128RegClass);
981 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
982 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
983 MachinePointerInfo::getStack(i * 16),
985 MemOps.push_back(Store);
986 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
987 DAG.getConstant(16, getPointerTy()));
990 FuncInfo->setVariadicFPRIdx(FPRIdx);
991 FuncInfo->setVariadicFPRSize(FPRSaveSize);
994 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
996 FuncInfo->setVariadicStackIdx(StackIdx);
997 FuncInfo->setVariadicGPRIdx(GPRIdx);
998 FuncInfo->setVariadicGPRSize(GPRSaveSize);
1000 if (!MemOps.empty()) {
1001 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
1008 AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
1009 CallingConv::ID CallConv, bool isVarArg,
1010 const SmallVectorImpl<ISD::InputArg> &Ins,
1011 SDLoc dl, SelectionDAG &DAG,
1012 SmallVectorImpl<SDValue> &InVals) const {
1013 MachineFunction &MF = DAG.getMachineFunction();
1014 AArch64MachineFunctionInfo *FuncInfo
1015 = MF.getInfo<AArch64MachineFunctionInfo>();
1016 MachineFrameInfo *MFI = MF.getFrameInfo();
1017 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1019 SmallVector<CCValAssign, 16> ArgLocs;
1020 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1021 getTargetMachine(), ArgLocs, *DAG.getContext());
1022 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
1024 SmallVector<SDValue, 16> ArgValues;
1027 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1028 CCValAssign &VA = ArgLocs[i];
1029 ISD::ArgFlagsTy Flags = Ins[i].Flags;
1031 if (Flags.isByVal()) {
1032 // Byval is used for small structs and HFAs in the PCS, but the system
1033 // should work in a non-compliant manner for larger structs.
1034 EVT PtrTy = getPointerTy();
1035 int Size = Flags.getByValSize();
1036 unsigned NumRegs = (Size + 7) / 8;
1038 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
1039 VA.getLocMemOffset(),
1041 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
1042 InVals.push_back(FrameIdxN);
1045 } else if (VA.isRegLoc()) {
1046 MVT RegVT = VA.getLocVT();
1047 const TargetRegisterClass *RC = getRegClassFor(RegVT);
1048 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1050 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1051 } else { // VA.isRegLoc()
1052 assert(VA.isMemLoc());
1054 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
1055 VA.getLocMemOffset(), true);
1057 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1058 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1059 MachinePointerInfo::getFixedStack(FI),
1060 false, false, false, 0);
1065 switch (VA.getLocInfo()) {
1066 default: llvm_unreachable("Unknown loc info!");
1067 case CCValAssign::Full: break;
1068 case CCValAssign::BCvt:
1069 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
1071 case CCValAssign::SExt:
1072 case CCValAssign::ZExt:
1073 case CCValAssign::AExt: {
1074 unsigned DestSize = VA.getValVT().getSizeInBits();
1075 unsigned DestSubReg;
1078 case 8: DestSubReg = AArch64::sub_8; break;
1079 case 16: DestSubReg = AArch64::sub_16; break;
1080 case 32: DestSubReg = AArch64::sub_32; break;
1081 case 64: DestSubReg = AArch64::sub_64; break;
1082 default: llvm_unreachable("Unexpected argument promotion");
1085 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1086 VA.getValVT(), ArgValue,
1087 DAG.getTargetConstant(DestSubReg, MVT::i32)),
1093 InVals.push_back(ArgValue);
1097 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
1099 unsigned StackArgSize = CCInfo.getNextStackOffset();
1100 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
1101 // This is a non-standard ABI so by fiat I say we're allowed to make full
1102 // use of the stack area to be popped, which must be aligned to 16 bytes in
1104 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
1106 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
1107 // a multiple of 16.
1108 FuncInfo->setArgumentStackToRestore(StackArgSize);
1110 // This realignment carries over to the available bytes below. Our own
1111 // callers will guarantee the space is free by giving an aligned value to
1114 // Even if we're not expected to free up the space, it's useful to know how
1115 // much is there while considering tail calls (because we can reuse it).
1116 FuncInfo->setBytesInStackArgArea(StackArgSize);
1122 AArch64TargetLowering::LowerReturn(SDValue Chain,
1123 CallingConv::ID CallConv, bool isVarArg,
1124 const SmallVectorImpl<ISD::OutputArg> &Outs,
1125 const SmallVectorImpl<SDValue> &OutVals,
1126 SDLoc dl, SelectionDAG &DAG) const {
1127 // CCValAssign - represent the assignment of the return value to a location.
1128 SmallVector<CCValAssign, 16> RVLocs;
1130 // CCState - Info about the registers and stack slots.
1131 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1132 getTargetMachine(), RVLocs, *DAG.getContext());
1134 // Analyze outgoing return values.
1135 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1138 SmallVector<SDValue, 4> RetOps(1, Chain);
1140 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1141 // PCS: "If the type, T, of the result of a function is such that
1142 // void func(T arg) would require that arg be passed as a value in a
1143 // register (or set of registers) according to the rules in 5.4, then the
1144 // result is returned in the same registers as would be used for such an
1147 // Otherwise, the caller shall reserve a block of memory of sufficient
1148 // size and alignment to hold the result. The address of the memory block
1149 // shall be passed as an additional argument to the function in x8."
1151 // This is implemented in two places. The register-return values are dealt
1152 // with here, more complex returns are passed as an sret parameter, which
1153 // means we don't have to worry about it during actual return.
1154 CCValAssign &VA = RVLocs[i];
1155 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1158 SDValue Arg = OutVals[i];
1160 // There's no convenient note in the ABI about this as there is for normal
1161 // arguments, but it says return values are passed in the same registers as
1162 // an argument would be. I believe that includes the comments about
1163 // unspecified higher bits, putting the burden of widening on the *caller*
1164 // for return values.
1165 switch (VA.getLocInfo()) {
1166 default: llvm_unreachable("Unknown loc info");
1167 case CCValAssign::Full: break;
1168 case CCValAssign::SExt:
1169 case CCValAssign::ZExt:
1170 case CCValAssign::AExt:
1171 // Floating-point values should only be extended when they're going into
1172 // memory, which can't happen here so an integer extend is acceptable.
1173 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1175 case CCValAssign::BCvt:
1176 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1180 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1181 Flag = Chain.getValue(1);
1182 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1185 RetOps[0] = Chain; // Update chain.
1187 // Add the flag if we have it.
1189 RetOps.push_back(Flag);
1191 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1192 &RetOps[0], RetOps.size());
1196 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1197 SmallVectorImpl<SDValue> &InVals) const {
1198 SelectionDAG &DAG = CLI.DAG;
1200 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1201 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1202 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1203 SDValue Chain = CLI.Chain;
1204 SDValue Callee = CLI.Callee;
1205 bool &IsTailCall = CLI.IsTailCall;
1206 CallingConv::ID CallConv = CLI.CallConv;
1207 bool IsVarArg = CLI.IsVarArg;
1209 MachineFunction &MF = DAG.getMachineFunction();
1210 AArch64MachineFunctionInfo *FuncInfo
1211 = MF.getInfo<AArch64MachineFunctionInfo>();
1212 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1213 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1214 bool IsSibCall = false;
1217 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1218 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1219 Outs, OutVals, Ins, DAG);
1221 // A sibling call is one where we're under the usual C ABI and not planning
1222 // to change that but can still do a tail call:
1223 if (!TailCallOpt && IsTailCall)
1227 SmallVector<CCValAssign, 16> ArgLocs;
1228 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1229 getTargetMachine(), ArgLocs, *DAG.getContext());
1230 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1232 // On AArch64 (and all other architectures I'm aware of) the most this has to
1233 // do is adjust the stack pointer.
1234 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1236 // Since we're not changing the ABI to make this a tail call, the memory
1237 // operands are already available in the caller's incoming argument space.
1241 // FPDiff is the byte offset of the call's argument area from the callee's.
1242 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1243 // by this amount for a tail call. In a sibling call it must be 0 because the
1244 // caller will deallocate the entire stack and the callee still expects its
1245 // arguments to begin at SP+0. Completely unused for non-tail calls.
1248 if (IsTailCall && !IsSibCall) {
1249 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1251 // FPDiff will be negative if this tail call requires more space than we
1252 // would automatically have in our incoming argument space. Positive if we
1253 // can actually shrink the stack.
1254 FPDiff = NumReusableBytes - NumBytes;
1256 // The stack pointer must be 16-byte aligned at all times it's used for a
1257 // memory operation, which in practice means at *all* times and in
1258 // particular across call boundaries. Therefore our own arguments started at
1259 // a 16-byte aligned SP and the delta applied for the tail call should
1260 // satisfy the same constraint.
1261 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1265 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
1268 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1271 SmallVector<SDValue, 8> MemOpChains;
1272 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1274 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1275 CCValAssign &VA = ArgLocs[i];
1276 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1277 SDValue Arg = OutVals[i];
1279 // Callee does the actual widening, so all extensions just use an implicit
1280 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1281 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1282 // alternative works on integer types too.
1283 switch (VA.getLocInfo()) {
1284 default: llvm_unreachable("Unknown loc info!");
1285 case CCValAssign::Full: break;
1286 case CCValAssign::SExt:
1287 case CCValAssign::ZExt:
1288 case CCValAssign::AExt: {
1289 unsigned SrcSize = VA.getValVT().getSizeInBits();
1293 case 8: SrcSubReg = AArch64::sub_8; break;
1294 case 16: SrcSubReg = AArch64::sub_16; break;
1295 case 32: SrcSubReg = AArch64::sub_32; break;
1296 case 64: SrcSubReg = AArch64::sub_64; break;
1297 default: llvm_unreachable("Unexpected argument promotion");
1300 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1302 DAG.getUNDEF(VA.getLocVT()),
1304 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1309 case CCValAssign::BCvt:
1310 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1314 if (VA.isRegLoc()) {
1315 // A normal register (sub-) argument. For now we just note it down because
1316 // we want to copy things into registers as late as possible to avoid
1317 // register-pressure (and possibly worse).
1318 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1322 assert(VA.isMemLoc() && "unexpected argument location");
1325 MachinePointerInfo DstInfo;
1327 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1328 VA.getLocVT().getSizeInBits();
1329 OpSize = (OpSize + 7) / 8;
1330 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1331 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1333 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1334 DstInfo = MachinePointerInfo::getFixedStack(FI);
1336 // Make sure any stack arguments overlapping with where we're storing are
1337 // loaded before this eventual operation. Otherwise they'll be clobbered.
1338 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1340 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
1342 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1343 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1346 if (Flags.isByVal()) {
1347 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1348 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1349 Flags.getByValAlign(),
1350 /*isVolatile = */ false,
1351 /*alwaysInline = */ false,
1352 DstInfo, MachinePointerInfo(0));
1353 MemOpChains.push_back(Cpy);
1355 // Normal stack argument, put it where it's needed.
1356 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1358 MemOpChains.push_back(Store);
1362 // The loads and stores generated above shouldn't clash with each
1363 // other. Combining them with this TokenFactor notes that fact for the rest of
1365 if (!MemOpChains.empty())
1366 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1367 &MemOpChains[0], MemOpChains.size());
1369 // Most of the rest of the instructions need to be glued together; we don't
1370 // want assignments to actual registers used by a call to be rearranged by a
1371 // well-meaning scheduler.
1374 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1375 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1376 RegsToPass[i].second, InFlag);
1377 InFlag = Chain.getValue(1);
1380 // The linker is responsible for inserting veneers when necessary to put a
1381 // function call destination in range, so we don't need to bother with a
1383 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1384 const GlobalValue *GV = G->getGlobal();
1385 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1386 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1387 const char *Sym = S->getSymbol();
1388 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1391 // We don't usually want to end the call-sequence here because we would tidy
1392 // the frame up *after* the call, however in the ABI-changing tail-call case
1393 // we've carefully laid out the parameters so that when sp is reset they'll be
1394 // in the correct location.
1395 if (IsTailCall && !IsSibCall) {
1396 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1397 DAG.getIntPtrConstant(0, true), InFlag, dl);
1398 InFlag = Chain.getValue(1);
1401 // We produce the following DAG scheme for the actual call instruction:
1402 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1404 // Most arguments aren't going to be used and just keep the values live as
1405 // far as LLVM is concerned. It's expected to be selected as simply "bl
1406 // callee" (for a direct, non-tail call).
1407 std::vector<SDValue> Ops;
1408 Ops.push_back(Chain);
1409 Ops.push_back(Callee);
1412 // Each tail call may have to adjust the stack by a different amount, so
1413 // this information must travel along with the operation for eventual
1414 // consumption by emitEpilogue.
1415 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1418 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1419 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1420 RegsToPass[i].second.getValueType()));
1423 // Add a register mask operand representing the call-preserved registers. This
1424 // is used later in codegen to constrain register-allocation.
1425 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1426 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1427 assert(Mask && "Missing call preserved mask for calling convention");
1428 Ops.push_back(DAG.getRegisterMask(Mask));
1430 // If we needed glue, put it in as the last argument.
1431 if (InFlag.getNode())
1432 Ops.push_back(InFlag);
1434 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1437 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1440 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1441 InFlag = Chain.getValue(1);
1443 // Now we can reclaim the stack, just as well do it before working out where
1444 // our return value is.
1446 uint64_t CalleePopBytes
1447 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1449 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1450 DAG.getIntPtrConstant(CalleePopBytes, true),
1452 InFlag = Chain.getValue(1);
1455 return LowerCallResult(Chain, InFlag, CallConv,
1456 IsVarArg, Ins, dl, DAG, InVals);
1460 AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1461 CallingConv::ID CallConv, bool IsVarArg,
1462 const SmallVectorImpl<ISD::InputArg> &Ins,
1463 SDLoc dl, SelectionDAG &DAG,
1464 SmallVectorImpl<SDValue> &InVals) const {
1465 // Assign locations to each value returned by this call.
1466 SmallVector<CCValAssign, 16> RVLocs;
1467 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1468 getTargetMachine(), RVLocs, *DAG.getContext());
1469 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1471 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1472 CCValAssign VA = RVLocs[i];
1474 // Return values that are too big to fit into registers should use an sret
1475 // pointer, so this can be a lot simpler than the main argument code.
1476 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1478 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1480 Chain = Val.getValue(1);
1481 InFlag = Val.getValue(2);
1483 switch (VA.getLocInfo()) {
1484 default: llvm_unreachable("Unknown loc info!");
1485 case CCValAssign::Full: break;
1486 case CCValAssign::BCvt:
1487 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1489 case CCValAssign::ZExt:
1490 case CCValAssign::SExt:
1491 case CCValAssign::AExt:
1492 // Floating-point arguments only get extended/truncated if they're going
1493 // in memory, so using the integer operation is acceptable here.
1494 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1498 InVals.push_back(Val);
1505 AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1506 CallingConv::ID CalleeCC,
1508 bool IsCalleeStructRet,
1509 bool IsCallerStructRet,
1510 const SmallVectorImpl<ISD::OutputArg> &Outs,
1511 const SmallVectorImpl<SDValue> &OutVals,
1512 const SmallVectorImpl<ISD::InputArg> &Ins,
1513 SelectionDAG& DAG) const {
1515 // For CallingConv::C this function knows whether the ABI needs
1516 // changing. That's not true for other conventions so they will have to opt in
1518 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1521 const MachineFunction &MF = DAG.getMachineFunction();
1522 const Function *CallerF = MF.getFunction();
1523 CallingConv::ID CallerCC = CallerF->getCallingConv();
1524 bool CCMatch = CallerCC == CalleeCC;
1526 // Byval parameters hand the function a pointer directly into the stack area
1527 // we want to reuse during a tail call. Working around this *is* possible (see
1528 // X86) but less efficient and uglier in LowerCall.
1529 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1530 e = CallerF->arg_end(); i != e; ++i)
1531 if (i->hasByValAttr())
1534 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1535 if (IsTailCallConvention(CalleeCC) && CCMatch)
1540 // Now we search for cases where we can use a tail call without changing the
1541 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1544 // I want anyone implementing a new calling convention to think long and hard
1545 // about this assert.
1546 assert((!IsVarArg || CalleeCC == CallingConv::C)
1547 && "Unexpected variadic calling convention");
1549 if (IsVarArg && !Outs.empty()) {
1550 // At least two cases here: if caller is fastcc then we can't have any
1551 // memory arguments (we'd be expected to clean up the stack afterwards). If
1552 // caller is C then we could potentially use its argument area.
1554 // FIXME: for now we take the most conservative of these in both cases:
1555 // disallow all variadic memory operands.
1556 SmallVector<CCValAssign, 16> ArgLocs;
1557 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1558 getTargetMachine(), ArgLocs, *DAG.getContext());
1560 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1561 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1562 if (!ArgLocs[i].isRegLoc())
1566 // If the calling conventions do not match, then we'd better make sure the
1567 // results are returned in the same way as what the caller expects.
1569 SmallVector<CCValAssign, 16> RVLocs1;
1570 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1571 getTargetMachine(), RVLocs1, *DAG.getContext());
1572 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1574 SmallVector<CCValAssign, 16> RVLocs2;
1575 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1576 getTargetMachine(), RVLocs2, *DAG.getContext());
1577 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1579 if (RVLocs1.size() != RVLocs2.size())
1581 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1582 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1584 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1586 if (RVLocs1[i].isRegLoc()) {
1587 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1590 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1596 // Nothing more to check if the callee is taking no arguments
1600 SmallVector<CCValAssign, 16> ArgLocs;
1601 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1602 getTargetMachine(), ArgLocs, *DAG.getContext());
1604 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1606 const AArch64MachineFunctionInfo *FuncInfo
1607 = MF.getInfo<AArch64MachineFunctionInfo>();
1609 // If the stack arguments for this call would fit into our own save area then
1610 // the call can be made tail.
1611 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1614 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1615 bool TailCallOpt) const {
1616 return CallCC == CallingConv::Fast && TailCallOpt;
1619 bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1620 return CallCC == CallingConv::Fast;
1623 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1625 MachineFrameInfo *MFI,
1626 int ClobberedFI) const {
1627 SmallVector<SDValue, 8> ArgChains;
1628 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1629 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1631 // Include the original chain at the beginning of the list. When this is
1632 // used by target LowerCall hooks, this helps legalize find the
1633 // CALLSEQ_BEGIN node.
1634 ArgChains.push_back(Chain);
1636 // Add a chain value for each stack argument corresponding
1637 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1638 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1639 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1640 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1641 if (FI->getIndex() < 0) {
1642 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1643 int64_t InLastByte = InFirstByte;
1644 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1646 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1647 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1648 ArgChains.push_back(SDValue(L, 1));
1651 // Build a tokenfactor for all the chains.
1652 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
1653 &ArgChains[0], ArgChains.size());
1656 static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1658 case ISD::SETEQ: return A64CC::EQ;
1659 case ISD::SETGT: return A64CC::GT;
1660 case ISD::SETGE: return A64CC::GE;
1661 case ISD::SETLT: return A64CC::LT;
1662 case ISD::SETLE: return A64CC::LE;
1663 case ISD::SETNE: return A64CC::NE;
1664 case ISD::SETUGT: return A64CC::HI;
1665 case ISD::SETUGE: return A64CC::HS;
1666 case ISD::SETULT: return A64CC::LO;
1667 case ISD::SETULE: return A64CC::LS;
1668 default: llvm_unreachable("Unexpected condition code");
1672 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1673 // icmp is implemented using adds/subs immediate, which take an unsigned
1674 // 12-bit immediate, optionally shifted left by 12 bits.
1676 // Symmetric by using adds/subs
1680 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1683 SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1684 ISD::CondCode CC, SDValue &A64cc,
1685 SelectionDAG &DAG, SDLoc &dl) const {
1686 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1688 EVT VT = RHSC->getValueType(0);
1689 bool knownInvalid = false;
1691 // I'm not convinced the rest of LLVM handles these edge cases properly, but
1692 // we can at least get it right.
1693 if (isSignedIntSetCC(CC)) {
1694 C = RHSC->getSExtValue();
1695 } else if (RHSC->getZExtValue() > INT64_MAX) {
1696 // A 64-bit constant not representable by a signed 64-bit integer is far
1697 // too big to fit into a SUBS immediate anyway.
1698 knownInvalid = true;
1700 C = RHSC->getZExtValue();
1703 if (!knownInvalid && !isLegalICmpImmediate(C)) {
1704 // Constant does not fit, try adjusting it by one?
1709 if (isLegalICmpImmediate(C-1)) {
1710 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1711 RHS = DAG.getConstant(C-1, VT);
1716 if (isLegalICmpImmediate(C-1)) {
1717 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1718 RHS = DAG.getConstant(C-1, VT);
1723 if (isLegalICmpImmediate(C+1)) {
1724 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1725 RHS = DAG.getConstant(C+1, VT);
1730 if (isLegalICmpImmediate(C+1)) {
1731 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1732 RHS = DAG.getConstant(C+1, VT);
1739 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
1740 A64cc = DAG.getConstant(CondCode, MVT::i32);
1741 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1742 DAG.getCondCode(CC));
1745 static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
1746 A64CC::CondCodes &Alternative) {
1747 A64CC::CondCodes CondCode = A64CC::Invalid;
1748 Alternative = A64CC::Invalid;
1751 default: llvm_unreachable("Unknown FP condition!");
1753 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
1755 case ISD::SETOGT: CondCode = A64CC::GT; break;
1757 case ISD::SETOGE: CondCode = A64CC::GE; break;
1758 case ISD::SETOLT: CondCode = A64CC::MI; break;
1759 case ISD::SETOLE: CondCode = A64CC::LS; break;
1760 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
1761 case ISD::SETO: CondCode = A64CC::VC; break;
1762 case ISD::SETUO: CondCode = A64CC::VS; break;
1763 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
1764 case ISD::SETUGT: CondCode = A64CC::HI; break;
1765 case ISD::SETUGE: CondCode = A64CC::PL; break;
1767 case ISD::SETULT: CondCode = A64CC::LT; break;
1769 case ISD::SETULE: CondCode = A64CC::LE; break;
1771 case ISD::SETUNE: CondCode = A64CC::NE; break;
1777 AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1779 EVT PtrVT = getPointerTy();
1780 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1782 switch(getTargetMachine().getCodeModel()) {
1783 case CodeModel::Small:
1784 // The most efficient code is PC-relative anyway for the small memory model,
1785 // so we don't need to worry about relocation model.
1786 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
1787 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1788 AArch64II::MO_NO_FLAG),
1789 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1790 AArch64II::MO_LO12),
1791 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
1792 case CodeModel::Large:
1794 AArch64ISD::WrapperLarge, DL, PtrVT,
1795 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
1796 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
1797 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
1798 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
1800 llvm_unreachable("Only small and large code models supported now");
1805 // (BRCOND chain, val, dest)
1807 AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1809 SDValue Chain = Op.getOperand(0);
1810 SDValue TheBit = Op.getOperand(1);
1811 SDValue DestBB = Op.getOperand(2);
1813 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
1814 // that as the consumer we are responsible for ignoring rubbish in higher
1816 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
1817 DAG.getConstant(1, MVT::i32));
1819 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
1820 DAG.getConstant(0, TheBit.getValueType()),
1821 DAG.getCondCode(ISD::SETNE));
1823 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
1824 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
1828 // (BR_CC chain, condcode, lhs, rhs, dest)
1830 AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1832 SDValue Chain = Op.getOperand(0);
1833 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1834 SDValue LHS = Op.getOperand(2);
1835 SDValue RHS = Op.getOperand(3);
1836 SDValue DestBB = Op.getOperand(4);
1838 if (LHS.getValueType() == MVT::f128) {
1839 // f128 comparisons are lowered to runtime calls by a routine which sets
1840 // LHS, RHS and CC appropriately for the rest of this function to continue.
1841 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
1843 // If softenSetCCOperands returned a scalar, we need to compare the result
1844 // against zero to select between true and false values.
1845 if (RHS.getNode() == 0) {
1846 RHS = DAG.getConstant(0, LHS.getValueType());
1851 if (LHS.getValueType().isInteger()) {
1854 // Integers are handled in a separate function because the combinations of
1855 // immediates and tests can get hairy and we may want to fiddle things.
1856 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
1858 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1859 Chain, CmpOp, A64cc, DestBB);
1862 // Note that some LLVM floating-point CondCodes can't be lowered to a single
1863 // conditional branch, hence FPCCToA64CC can set a second test, where either
1864 // passing is sufficient.
1865 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
1866 CondCode = FPCCToA64CC(CC, Alternative);
1867 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
1868 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1869 DAG.getCondCode(CC));
1870 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1871 Chain, SetCC, A64cc, DestBB);
1873 if (Alternative != A64CC::Invalid) {
1874 A64cc = DAG.getConstant(Alternative, MVT::i32);
1875 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1876 A64BR_CC, SetCC, A64cc, DestBB);
1884 AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
1885 RTLIB::Libcall Call) const {
1888 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
1889 EVT ArgVT = Op.getOperand(i).getValueType();
1890 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1891 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
1892 Entry.isSExt = false;
1893 Entry.isZExt = false;
1894 Args.push_back(Entry);
1896 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
1898 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
1900 // By default, the input chain to this libcall is the entry node of the
1901 // function. If the libcall is going to be emitted as a tail call then
1902 // isUsedByReturnOnly will change it to the right chain if the return
1903 // node which is being folded has a non-entry input chain.
1904 SDValue InChain = DAG.getEntryNode();
1906 // isTailCall may be true since the callee does not reference caller stack
1907 // frame. Check if it's in the right position.
1908 SDValue TCChain = InChain;
1909 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
1914 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
1915 0, getLibcallCallingConv(Call), isTailCall,
1916 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1917 Callee, Args, DAG, SDLoc(Op));
1918 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1920 if (!CallInfo.second.getNode())
1921 // It's a tailcall, return the chain (which is the DAG root).
1922 return DAG.getRoot();
1924 return CallInfo.first;
1928 AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
1929 if (Op.getOperand(0).getValueType() != MVT::f128) {
1930 // It's legal except when f128 is involved
1935 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
1937 SDValue SrcVal = Op.getOperand(0);
1938 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
1939 /*isSigned*/ false, SDLoc(Op)).first;
1943 AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
1944 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
1947 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
1949 return LowerF128ToCall(Op, DAG, LC);
1953 AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1954 bool IsSigned) const {
1955 if (Op.getOperand(0).getValueType() != MVT::f128) {
1956 // It's legal except when f128 is involved
1962 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
1964 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
1966 return LowerF128ToCall(Op, DAG, LC);
1969 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
1970 MachineFunction &MF = DAG.getMachineFunction();
1971 MachineFrameInfo *MFI = MF.getFrameInfo();
1972 MFI->setReturnAddressIsTaken(true);
1974 EVT VT = Op.getValueType();
1976 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1978 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1979 SDValue Offset = DAG.getConstant(8, MVT::i64);
1980 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1981 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1982 MachinePointerInfo(), false, false, false, 0);
1985 // Return X30, which contains the return address. Mark it an implicit live-in.
1986 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
1987 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
1991 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
1993 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1994 MFI->setFrameAddressIsTaken(true);
1996 EVT VT = Op.getValueType();
1998 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1999 unsigned FrameReg = AArch64::X29;
2000 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2002 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
2003 MachinePointerInfo(),
2004 false, false, false, 0);
2009 AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
2010 SelectionDAG &DAG) const {
2011 assert(getTargetMachine().getCodeModel() == CodeModel::Large);
2012 assert(getTargetMachine().getRelocationModel() == Reloc::Static);
2014 EVT PtrVT = getPointerTy();
2016 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2017 const GlobalValue *GV = GN->getGlobal();
2019 SDValue GlobalAddr = DAG.getNode(
2020 AArch64ISD::WrapperLarge, dl, PtrVT,
2021 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
2022 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2023 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2024 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2026 if (GN->getOffset() != 0)
2027 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2028 DAG.getConstant(GN->getOffset(), PtrVT));
2034 AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
2035 SelectionDAG &DAG) const {
2036 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
2038 EVT PtrVT = getPointerTy();
2040 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2041 const GlobalValue *GV = GN->getGlobal();
2042 unsigned Alignment = GV->getAlignment();
2043 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2044 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
2045 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
2046 // to zero when they remain undefined. In PIC mode the GOT can take care of
2047 // this, but in absolute mode we use a constant pool load.
2049 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2050 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2051 AArch64II::MO_NO_FLAG),
2052 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2053 AArch64II::MO_LO12),
2054 DAG.getConstant(8, MVT::i32));
2055 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
2056 MachinePointerInfo::getConstantPool(),
2057 /*isVolatile=*/ false,
2058 /*isNonTemporal=*/ true,
2059 /*isInvariant=*/ true, 8);
2060 if (GN->getOffset() != 0)
2061 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2062 DAG.getConstant(GN->getOffset(), PtrVT));
2067 if (Alignment == 0) {
2068 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
2069 if (GVPtrTy->getElementType()->isSized()) {
2071 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
2073 // Be conservative if we can't guess, not that it really matters:
2074 // functions and labels aren't valid for loads, and the methods used to
2075 // actually calculate an address work with any alignment.
2080 unsigned char HiFixup, LoFixup;
2081 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
2084 HiFixup = AArch64II::MO_GOT;
2085 LoFixup = AArch64II::MO_GOT_LO12;
2088 HiFixup = AArch64II::MO_NO_FLAG;
2089 LoFixup = AArch64II::MO_LO12;
2092 // AArch64's small model demands the following sequence:
2093 // ADRP x0, somewhere
2094 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
2095 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2096 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2098 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2100 DAG.getConstant(Alignment, MVT::i32));
2103 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
2107 if (GN->getOffset() != 0)
2108 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
2109 DAG.getConstant(GN->getOffset(), PtrVT));
2115 AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
2116 SelectionDAG &DAG) const {
2117 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
2118 // we make those distinctions here.
2120 switch (getTargetMachine().getCodeModel()) {
2121 case CodeModel::Small:
2122 return LowerGlobalAddressELFSmall(Op, DAG);
2123 case CodeModel::Large:
2124 return LowerGlobalAddressELFLarge(Op, DAG);
2126 llvm_unreachable("Only small and large code models supported now");
2130 SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
2133 SelectionDAG &DAG) const {
2134 EVT PtrVT = getPointerTy();
2136 // The function we need to call is simply the first entry in the GOT for this
2137 // descriptor, load it in preparation.
2138 SDValue Func, Chain;
2139 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2142 // The function takes only one argument: the address of the descriptor itself
2145 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
2146 Glue = Chain.getValue(1);
2148 // Finally, there's a special calling-convention which means that the lookup
2149 // must preserve all registers (except X0, obviously).
2150 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2151 const AArch64RegisterInfo *A64RI
2152 = static_cast<const AArch64RegisterInfo *>(TRI);
2153 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2155 // We're now ready to populate the argument list, as with a normal call:
2156 std::vector<SDValue> Ops;
2157 Ops.push_back(Chain);
2158 Ops.push_back(Func);
2159 Ops.push_back(SymAddr);
2160 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2161 Ops.push_back(DAG.getRegisterMask(Mask));
2162 Ops.push_back(Glue);
2164 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2165 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2167 Glue = Chain.getValue(1);
2169 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2170 // back to the generic handling code.
2171 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2175 AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2176 SelectionDAG &DAG) const {
2177 assert(getSubtarget()->isTargetELF() &&
2178 "TLS not implemented for non-ELF targets");
2179 assert(getTargetMachine().getCodeModel() == CodeModel::Small
2180 && "TLS only supported in small memory model");
2181 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2183 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2186 EVT PtrVT = getPointerTy();
2188 const GlobalValue *GV = GA->getGlobal();
2190 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2192 if (Model == TLSModel::InitialExec) {
2193 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2194 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2195 AArch64II::MO_GOTTPREL),
2196 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2197 AArch64II::MO_GOTTPREL_LO12),
2198 DAG.getConstant(8, MVT::i32));
2199 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2201 } else if (Model == TLSModel::LocalExec) {
2202 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2203 AArch64II::MO_TPREL_G1);
2204 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2205 AArch64II::MO_TPREL_G0_NC);
2207 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2208 DAG.getTargetConstant(1, MVT::i32)), 0);
2209 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2211 DAG.getTargetConstant(0, MVT::i32)), 0);
2212 } else if (Model == TLSModel::GeneralDynamic) {
2213 // Accesses used in this sequence go via the TLS descriptor which lives in
2214 // the GOT. Prepare an address we can use to handle this.
2215 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2216 AArch64II::MO_TLSDESC);
2217 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2218 AArch64II::MO_TLSDESC_LO12);
2219 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2221 DAG.getConstant(8, MVT::i32));
2222 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2224 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2225 } else if (Model == TLSModel::LocalDynamic) {
2226 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2227 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2228 // the beginning of the module's TLS region, followed by a DTPREL offset
2231 // These accesses will need deduplicating if there's more than one.
2232 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2233 .getInfo<AArch64MachineFunctionInfo>();
2234 MFI->incNumLocalDynamicTLSAccesses();
2237 // Get the location of _TLS_MODULE_BASE_:
2238 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2239 AArch64II::MO_TLSDESC);
2240 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2241 AArch64II::MO_TLSDESC_LO12);
2242 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2244 DAG.getConstant(8, MVT::i32));
2245 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2247 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2249 // Get the variable's offset from _TLS_MODULE_BASE_
2250 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2251 AArch64II::MO_DTPREL_G1);
2252 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2253 AArch64II::MO_DTPREL_G0_NC);
2255 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2256 DAG.getTargetConstant(0, MVT::i32)), 0);
2257 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2259 DAG.getTargetConstant(0, MVT::i32)), 0);
2261 llvm_unreachable("Unsupported TLS access model");
2264 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2268 AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2269 bool IsSigned) const {
2270 if (Op.getValueType() != MVT::f128) {
2271 // Legal for everything except f128.
2277 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2279 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2281 return LowerF128ToCall(Op, DAG, LC);
2286 AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2287 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2289 EVT PtrVT = getPointerTy();
2291 // When compiling PIC, jump tables get put in the code section so a static
2292 // relocation-style is acceptable for both cases.
2293 switch (getTargetMachine().getCodeModel()) {
2294 case CodeModel::Small:
2295 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2296 DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2297 DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2298 AArch64II::MO_LO12),
2299 DAG.getConstant(1, MVT::i32));
2300 case CodeModel::Large:
2302 AArch64ISD::WrapperLarge, dl, PtrVT,
2303 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2304 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2305 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2306 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2308 llvm_unreachable("Only small and large code models supported now");
2312 // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
2314 AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2316 SDValue LHS = Op.getOperand(0);
2317 SDValue RHS = Op.getOperand(1);
2318 SDValue IfTrue = Op.getOperand(2);
2319 SDValue IfFalse = Op.getOperand(3);
2320 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2322 if (LHS.getValueType() == MVT::f128) {
2323 // f128 comparisons are lowered to libcalls, but slot in nicely here
2325 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2327 // If softenSetCCOperands returned a scalar, we need to compare the result
2328 // against zero to select between true and false values.
2329 if (RHS.getNode() == 0) {
2330 RHS = DAG.getConstant(0, LHS.getValueType());
2335 if (LHS.getValueType().isInteger()) {
2338 // Integers are handled in a separate function because the combinations of
2339 // immediates and tests can get hairy and we may want to fiddle things.
2340 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2342 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2343 CmpOp, IfTrue, IfFalse, A64cc);
2346 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2347 // conditional branch, hence FPCCToA64CC can set a second test, where either
2348 // passing is sufficient.
2349 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2350 CondCode = FPCCToA64CC(CC, Alternative);
2351 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2352 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2353 DAG.getCondCode(CC));
2354 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
2356 SetCC, IfTrue, IfFalse, A64cc);
2358 if (Alternative != A64CC::Invalid) {
2359 A64cc = DAG.getConstant(Alternative, MVT::i32);
2360 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2361 SetCC, IfTrue, A64SELECT_CC, A64cc);
2365 return A64SELECT_CC;
2368 // (SELECT testbit, iftrue, iffalse)
2370 AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2372 SDValue TheBit = Op.getOperand(0);
2373 SDValue IfTrue = Op.getOperand(1);
2374 SDValue IfFalse = Op.getOperand(2);
2376 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2377 // that as the consumer we are responsible for ignoring rubbish in higher
2379 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2380 DAG.getConstant(1, MVT::i32));
2381 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2382 DAG.getConstant(0, TheBit.getValueType()),
2383 DAG.getCondCode(ISD::SETNE));
2385 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2386 A64CMP, IfTrue, IfFalse,
2387 DAG.getConstant(A64CC::NE, MVT::i32));
2390 static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
2392 SDValue LHS = Op.getOperand(0);
2393 SDValue RHS = Op.getOperand(1);
2394 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2395 EVT VT = Op.getValueType();
2396 bool Invert = false;
2400 if (LHS.getValueType().isInteger()) {
2402 // Attempt to use Vector Integer Compare Mask Test instruction.
2403 // TST = icmp ne (and (op0, op1), zero).
2404 if (CC == ISD::SETNE) {
2405 if (((LHS.getOpcode() == ISD::AND) &&
2406 ISD::isBuildVectorAllZeros(RHS.getNode())) ||
2407 ((RHS.getOpcode() == ISD::AND) &&
2408 ISD::isBuildVectorAllZeros(LHS.getNode()))) {
2410 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
2411 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
2412 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
2413 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
2417 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
2418 // Note: Compare against Zero does not support unsigned predicates.
2419 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2420 ISD::isBuildVectorAllZeros(LHS.getNode())) &&
2421 !isUnsignedIntSetCC(CC)) {
2423 // If LHS is the zero value, swap operands and CondCode.
2424 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2425 CC = getSetCCSwappedOperands(CC);
2430 // Ensure valid CondCode for Compare Mask against Zero instruction:
2431 // EQ, GE, GT, LE, LT.
2432 if (ISD::SETNE == CC) {
2437 // Using constant type to differentiate integer and FP compares with zero.
2438 Op1 = DAG.getConstant(0, MVT::i32);
2439 Opcode = AArch64ISD::NEON_CMPZ;
2442 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
2443 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
2447 llvm_unreachable("Illegal integer comparison.");
2463 CC = getSetCCSwappedOperands(CC);
2467 std::swap(LHS, RHS);
2469 Opcode = AArch64ISD::NEON_CMP;
2474 // Generate Compare Mask instr or Compare Mask against Zero instr.
2476 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2479 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2484 // Now handle Floating Point cases.
2485 // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
2486 if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2487 ISD::isBuildVectorAllZeros(LHS.getNode())) {
2489 // If LHS is the zero value, swap operands and CondCode.
2490 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2491 CC = getSetCCSwappedOperands(CC);
2496 // Using constant type to differentiate integer and FP compares with zero.
2497 Op1 = DAG.getConstantFP(0, MVT::f32);
2498 Opcode = AArch64ISD::NEON_CMPZ;
2500 // Attempt to use Vector Floating Point Compare Mask instruction.
2503 Opcode = AArch64ISD::NEON_CMP;
2507 // Some register compares have to be implemented with swapped CC and operands,
2508 // e.g.: OLT implemented as OGT with swapped operands.
2509 bool SwapIfRegArgs = false;
2511 // Ensure valid CondCode for FP Compare Mask against Zero instruction:
2512 // EQ, GE, GT, LE, LT.
2513 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
2516 llvm_unreachable("Illegal FP comparison");
2519 Invert = true; // Fallthrough
2527 SwapIfRegArgs = true;
2536 SwapIfRegArgs = true;
2545 SwapIfRegArgs = true;
2554 SwapIfRegArgs = true;
2561 Invert = true; // Fallthrough
2563 // Expand this to (OGT |OLT).
2565 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
2567 SwapIfRegArgs = true;
2570 Invert = true; // Fallthrough
2572 // Expand this to (OGE | OLT).
2574 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
2576 SwapIfRegArgs = true;
2580 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
2581 CC = getSetCCSwappedOperands(CC);
2582 std::swap(Op0, Op1);
2585 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
2586 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2588 if (NeonCmpAlt.getNode())
2589 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
2592 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2597 // (SETCC lhs, rhs, condcode)
2599 AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2601 SDValue LHS = Op.getOperand(0);
2602 SDValue RHS = Op.getOperand(1);
2603 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2604 EVT VT = Op.getValueType();
2607 return LowerVectorSETCC(Op, DAG);
2609 if (LHS.getValueType() == MVT::f128) {
2610 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
2611 // for the rest of the function (some i32 or i64 values).
2612 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2614 // If softenSetCCOperands returned a scalar, use it.
2615 if (RHS.getNode() == 0) {
2616 assert(LHS.getValueType() == Op.getValueType() &&
2617 "Unexpected setcc expansion!");
2622 if (LHS.getValueType().isInteger()) {
2625 // Integers are handled in a separate function because the combinations of
2626 // immediates and tests can get hairy and we may want to fiddle things.
2627 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2629 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2630 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
2634 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2635 // conditional branch, hence FPCCToA64CC can set a second test, where either
2636 // passing is sufficient.
2637 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2638 CondCode = FPCCToA64CC(CC, Alternative);
2639 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2640 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2641 DAG.getCondCode(CC));
2642 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2643 CmpOp, DAG.getConstant(1, VT),
2644 DAG.getConstant(0, VT), A64cc);
2646 if (Alternative != A64CC::Invalid) {
2647 A64cc = DAG.getConstant(Alternative, MVT::i32);
2648 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
2649 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
2652 return A64SELECT_CC;
2656 AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2657 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2658 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2660 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
2661 // rather than just 8.
2662 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
2663 Op.getOperand(1), Op.getOperand(2),
2664 DAG.getConstant(32, MVT::i32), 8, false, false,
2665 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
2669 AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2670 // The layout of the va_list struct is specified in the AArch64 Procedure Call
2671 // Standard, section B.3.
2672 MachineFunction &MF = DAG.getMachineFunction();
2673 AArch64MachineFunctionInfo *FuncInfo
2674 = MF.getInfo<AArch64MachineFunctionInfo>();
2677 SDValue Chain = Op.getOperand(0);
2678 SDValue VAList = Op.getOperand(1);
2679 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2680 SmallVector<SDValue, 4> MemOps;
2682 // void *__stack at offset 0
2683 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
2685 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
2686 MachinePointerInfo(SV), false, false, 0));
2688 // void *__gr_top at offset 8
2689 int GPRSize = FuncInfo->getVariadicGPRSize();
2691 SDValue GRTop, GRTopAddr;
2693 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2694 DAG.getConstant(8, getPointerTy()));
2696 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
2697 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
2698 DAG.getConstant(GPRSize, getPointerTy()));
2700 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
2701 MachinePointerInfo(SV, 8),
2705 // void *__vr_top at offset 16
2706 int FPRSize = FuncInfo->getVariadicFPRSize();
2708 SDValue VRTop, VRTopAddr;
2709 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2710 DAG.getConstant(16, getPointerTy()));
2712 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
2713 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
2714 DAG.getConstant(FPRSize, getPointerTy()));
2716 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
2717 MachinePointerInfo(SV, 16),
2721 // int __gr_offs at offset 24
2722 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2723 DAG.getConstant(24, getPointerTy()));
2724 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
2725 GROffsAddr, MachinePointerInfo(SV, 24),
2728 // int __vr_offs at offset 28
2729 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2730 DAG.getConstant(28, getPointerTy()));
2731 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
2732 VROffsAddr, MachinePointerInfo(SV, 28),
2735 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
2740 AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2741 switch (Op.getOpcode()) {
2742 default: llvm_unreachable("Don't know how to custom lower this!");
2743 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
2744 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
2745 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
2746 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
2747 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
2748 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
2749 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
2750 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
2751 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
2752 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
2753 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
2754 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
2756 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2757 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2758 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
2759 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
2760 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2761 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2762 case ISD::SELECT: return LowerSELECT(Op, DAG);
2763 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2764 case ISD::SETCC: return LowerSETCC(Op, DAG);
2765 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
2766 case ISD::VASTART: return LowerVASTART(Op, DAG);
2767 case ISD::BUILD_VECTOR:
2768 return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
2769 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2775 /// Check if the specified splat value corresponds to a valid vector constant
2776 /// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
2777 /// so, return the encoded 8-bit immediate and the OpCmode instruction fields
2779 static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
2780 unsigned SplatBitSize, SelectionDAG &DAG,
2781 bool is128Bits, NeonModImmType type, EVT &VT,
2782 unsigned &Imm, unsigned &OpCmode) {
2783 switch (SplatBitSize) {
2785 llvm_unreachable("unexpected size for isNeonModifiedImm");
2787 if (type != Neon_Mov_Imm)
2789 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
2790 // Neon movi per byte: Op=0, Cmode=1110.
2793 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
2797 // Neon move inst per halfword
2798 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
2799 if ((SplatBits & ~0xff) == 0) {
2800 // Value = 0x00nn is 0x00nn LSL 0
2801 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
2802 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
2808 if ((SplatBits & ~0xff00) == 0) {
2809 // Value = 0xnn00 is 0x00nn LSL 8
2810 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
2811 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
2813 Imm = SplatBits >> 8;
2817 // can't handle any other
2822 // First the LSL variants (MSL is unusable by some interested instructions).
2824 // Neon move instr per word, shift zeros
2825 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
2826 if ((SplatBits & ~0xff) == 0) {
2827 // Value = 0x000000nn is 0x000000nn LSL 0
2828 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
2829 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
2835 if ((SplatBits & ~0xff00) == 0) {
2836 // Value = 0x0000nn00 is 0x000000nn LSL 8
2837 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
2838 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
2840 Imm = SplatBits >> 8;
2844 if ((SplatBits & ~0xff0000) == 0) {
2845 // Value = 0x00nn0000 is 0x000000nn LSL 16
2846 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
2847 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
2849 Imm = SplatBits >> 16;
2853 if ((SplatBits & ~0xff000000) == 0) {
2854 // Value = 0xnn000000 is 0x000000nn LSL 24
2855 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
2856 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
2858 Imm = SplatBits >> 24;
2863 // Now the MSL immediates.
2865 // Neon move instr per word, shift ones
2866 if ((SplatBits & ~0xffff) == 0 &&
2867 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
2868 // Value = 0x0000nnff is 0x000000nn MSL 8
2869 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
2871 Imm = SplatBits >> 8;
2875 if ((SplatBits & ~0xffffff) == 0 &&
2876 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
2877 // Value = 0x00nnffff is 0x000000nn MSL 16
2878 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
2880 Imm = SplatBits >> 16;
2884 // can't handle any other
2889 if (type != Neon_Mov_Imm)
2891 // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
2892 // movi Op=1, Cmode=1110.
2894 uint64_t BitMask = 0xff;
2896 unsigned ImmMask = 1;
2898 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
2899 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
2902 } else if ((SplatBits & BitMask) != 0) {
2909 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
2917 static SDValue PerformANDCombine(SDNode *N,
2918 TargetLowering::DAGCombinerInfo &DCI) {
2920 SelectionDAG &DAG = DCI.DAG;
2922 EVT VT = N->getValueType(0);
2924 // We're looking for an SRA/SHL pair which form an SBFX.
2926 if (VT != MVT::i32 && VT != MVT::i64)
2929 if (!isa<ConstantSDNode>(N->getOperand(1)))
2932 uint64_t TruncMask = N->getConstantOperandVal(1);
2933 if (!isMask_64(TruncMask))
2936 uint64_t Width = CountPopulation_64(TruncMask);
2937 SDValue Shift = N->getOperand(0);
2939 if (Shift.getOpcode() != ISD::SRL)
2942 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
2944 uint64_t LSB = Shift->getConstantOperandVal(1);
2946 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
2949 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
2950 DAG.getConstant(LSB, MVT::i64),
2951 DAG.getConstant(LSB + Width - 1, MVT::i64));
2954 /// For a true bitfield insert, the bits getting into that contiguous mask
2955 /// should come from the low part of an existing value: they must be formed from
2956 /// a compatible SHL operation (unless they're already low). This function
2957 /// checks that condition and returns the least-significant bit that's
2958 /// intended. If the operation not a field preparation, -1 is returned.
2959 static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
2960 SDValue &MaskedVal, uint64_t Mask) {
2961 if (!isShiftedMask_64(Mask))
2964 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
2965 // instruction. BFI will do a left-shift by LSB before applying the mask we've
2966 // spotted, so in general we should pre-emptively "undo" that by making sure
2967 // the incoming bits have had a right-shift applied to them.
2969 // This right shift, however, will combine with existing left/right shifts. In
2970 // the simplest case of a completely straight bitfield operation, it will be
2971 // expected to completely cancel out with an existing SHL. More complicated
2972 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
2975 uint64_t LSB = countTrailingZeros(Mask);
2976 int64_t ShiftRightRequired = LSB;
2977 if (MaskedVal.getOpcode() == ISD::SHL &&
2978 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2979 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
2980 MaskedVal = MaskedVal.getOperand(0);
2981 } else if (MaskedVal.getOpcode() == ISD::SRL &&
2982 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2983 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
2984 MaskedVal = MaskedVal.getOperand(0);
2987 if (ShiftRightRequired > 0)
2988 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
2989 DAG.getConstant(ShiftRightRequired, MVT::i64));
2990 else if (ShiftRightRequired < 0) {
2991 // We could actually end up with a residual left shift, for example with
2992 // "struc.bitfield = val << 1".
2993 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
2994 DAG.getConstant(-ShiftRightRequired, MVT::i64));
3000 /// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
3001 /// a mask and an extension. Returns true if a BFI was found and provides
3002 /// information on its surroundings.
3003 static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
3006 if (N.getOpcode() == ISD::ZERO_EXTEND) {
3008 N = N.getOperand(0);
3011 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
3012 Mask = N->getConstantOperandVal(1);
3013 N = N.getOperand(0);
3015 // Mask is the whole width.
3016 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
3019 if (N.getOpcode() == AArch64ISD::BFI) {
3027 /// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
3028 /// is roughly equivalent to (and (BFI ...), mask). This form is used because it
3029 /// can often be further combined with a larger mask. Ultimately, we want mask
3030 /// to be 2^32-1 or 2^64-1 so the AND can be skipped.
3031 static SDValue tryCombineToBFI(SDNode *N,
3032 TargetLowering::DAGCombinerInfo &DCI,
3033 const AArch64Subtarget *Subtarget) {
3034 SelectionDAG &DAG = DCI.DAG;
3036 EVT VT = N->getValueType(0);
3038 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3040 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
3041 // abandon the effort.
3042 SDValue LHS = N->getOperand(0);
3043 if (LHS.getOpcode() != ISD::AND)
3047 if (isa<ConstantSDNode>(LHS.getOperand(1)))
3048 LHSMask = LHS->getConstantOperandVal(1);
3052 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
3053 // is or abandon the effort.
3054 SDValue RHS = N->getOperand(1);
3055 if (RHS.getOpcode() != ISD::AND)
3059 if (isa<ConstantSDNode>(RHS.getOperand(1)))
3060 RHSMask = RHS->getConstantOperandVal(1);
3064 // Can't do anything if the masks are incompatible.
3065 if (LHSMask & RHSMask)
3068 // Now we need one of the masks to be a contiguous field. Without loss of
3069 // generality that should be the RHS one.
3070 SDValue Bitfield = LHS.getOperand(0);
3071 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
3072 // We know that LHS is a candidate new value, and RHS isn't already a better
3074 std::swap(LHS, RHS);
3075 std::swap(LHSMask, RHSMask);
3078 // We've done our best to put the right operands in the right places, all we
3079 // can do now is check whether a BFI exists.
3080 Bitfield = RHS.getOperand(0);
3081 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
3085 uint32_t Width = CountPopulation_64(RHSMask);
3086 assert(Width && "Expected non-zero bitfield width");
3088 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3089 LHS.getOperand(0), Bitfield,
3090 DAG.getConstant(LSB, MVT::i64),
3091 DAG.getConstant(Width, MVT::i64));
3094 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3097 return DAG.getNode(ISD::AND, DL, VT, BFI,
3098 DAG.getConstant(LHSMask | RHSMask, VT));
3101 /// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
3102 /// original input. This is surprisingly common because SROA splits things up
3103 /// into i8 chunks, so the originally detected MaskedBFI may actually only act
3104 /// on the low (say) byte of a word. This is then orred into the rest of the
3105 /// word afterwards.
3107 /// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
3109 /// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
3110 /// MaskedBFI. We can also deal with a certain amount of extend/truncate being
3112 static SDValue tryCombineToLargerBFI(SDNode *N,
3113 TargetLowering::DAGCombinerInfo &DCI,
3114 const AArch64Subtarget *Subtarget) {
3115 SelectionDAG &DAG = DCI.DAG;
3117 EVT VT = N->getValueType(0);
3119 // First job is to hunt for a MaskedBFI on either the left or right. Swap
3120 // operands if it's actually on the right.
3122 SDValue PossExtraMask;
3123 uint64_t ExistingMask = 0;
3124 bool Extended = false;
3125 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
3126 PossExtraMask = N->getOperand(1);
3127 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
3128 PossExtraMask = N->getOperand(0);
3132 // We can only combine a BFI with another compatible mask.
3133 if (PossExtraMask.getOpcode() != ISD::AND ||
3134 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
3137 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
3139 // Masks must be compatible.
3140 if (ExtraMask & ExistingMask)
3143 SDValue OldBFIVal = BFI.getOperand(0);
3144 SDValue NewBFIVal = BFI.getOperand(1);
3146 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
3147 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
3148 // need to be made compatible.
3149 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
3150 && "Invalid types for BFI");
3151 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
3152 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
3155 // We need the MaskedBFI to be combined with a mask of the *same* value.
3156 if (PossExtraMask.getOperand(0) != OldBFIVal)
3159 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3160 OldBFIVal, NewBFIVal,
3161 BFI.getOperand(2), BFI.getOperand(3));
3163 // If the masking is trivial, we don't need to create it.
3164 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3167 return DAG.getNode(ISD::AND, DL, VT, BFI,
3168 DAG.getConstant(ExtraMask | ExistingMask, VT));
3171 /// An EXTR instruction is made up of two shifts, ORed together. This helper
3172 /// searches for and classifies those shifts.
3173 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
3175 if (N.getOpcode() == ISD::SHL)
3177 else if (N.getOpcode() == ISD::SRL)
3182 if (!isa<ConstantSDNode>(N.getOperand(1)))
3185 ShiftAmount = N->getConstantOperandVal(1);
3186 Src = N->getOperand(0);
3190 /// EXTR instruction extracts a contiguous chunk of bits from two existing
3191 /// registers viewed as a high/low pair. This function looks for the pattern:
3192 /// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
3193 /// EXTR. Can't quite be done in TableGen because the two immediates aren't
3195 static SDValue tryCombineToEXTR(SDNode *N,
3196 TargetLowering::DAGCombinerInfo &DCI) {
3197 SelectionDAG &DAG = DCI.DAG;
3199 EVT VT = N->getValueType(0);
3201 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3203 if (VT != MVT::i32 && VT != MVT::i64)
3207 uint32_t ShiftLHS = 0;
3209 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
3213 uint32_t ShiftRHS = 0;
3215 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
3218 // If they're both trying to come from the high part of the register, they're
3219 // not really an EXTR.
3220 if (LHSFromHi == RHSFromHi)
3223 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
3227 std::swap(LHS, RHS);
3228 std::swap(ShiftLHS, ShiftRHS);
3231 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
3233 DAG.getConstant(ShiftRHS, MVT::i64));
3236 /// Target-specific dag combine xforms for ISD::OR
3237 static SDValue PerformORCombine(SDNode *N,
3238 TargetLowering::DAGCombinerInfo &DCI,
3239 const AArch64Subtarget *Subtarget) {
3241 SelectionDAG &DAG = DCI.DAG;
3243 EVT VT = N->getValueType(0);
3245 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3248 // Attempt to recognise bitfield-insert operations.
3249 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
3253 // Attempt to combine an existing MaskedBFI operation into one with a larger
3255 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
3259 Res = tryCombineToEXTR(N, DCI);
3263 if (!Subtarget->hasNEON())
3266 // Attempt to use vector immediate-form BSL
3267 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
3269 SDValue N0 = N->getOperand(0);
3270 if (N0.getOpcode() != ISD::AND)
3273 SDValue N1 = N->getOperand(1);
3274 if (N1.getOpcode() != ISD::AND)
3277 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
3279 unsigned SplatBitSize;
3281 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
3283 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3286 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
3288 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3290 !HasAnyUndefs && SplatBits0 == ~SplatBits1) {
3291 // Canonicalize the vector type to make instruction selection simpler.
3292 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8;
3293 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT,
3294 N0->getOperand(1), N0->getOperand(0),
3296 return DAG.getNode(ISD::BITCAST, DL, VT, Result);
3304 /// Target-specific dag combine xforms for ISD::SRA
3305 static SDValue PerformSRACombine(SDNode *N,
3306 TargetLowering::DAGCombinerInfo &DCI) {
3308 SelectionDAG &DAG = DCI.DAG;
3310 EVT VT = N->getValueType(0);
3312 // We're looking for an SRA/SHL pair which form an SBFX.
3314 if (VT != MVT::i32 && VT != MVT::i64)
3317 if (!isa<ConstantSDNode>(N->getOperand(1)))
3320 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
3321 SDValue Shift = N->getOperand(0);
3323 if (Shift.getOpcode() != ISD::SHL)
3326 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3329 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
3330 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
3331 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
3333 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3336 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
3337 DAG.getConstant(LSB, MVT::i64),
3338 DAG.getConstant(LSB + Width - 1, MVT::i64));
3341 /// Check if this is a valid build_vector for the immediate operand of
3342 /// a vector shift operation, where all the elements of the build_vector
3343 /// must have the same constant integer value.
3344 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3345 // Ignore bit_converts.
3346 while (Op.getOpcode() == ISD::BITCAST)
3347 Op = Op.getOperand(0);
3348 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3349 APInt SplatBits, SplatUndef;
3350 unsigned SplatBitSize;
3352 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3353 HasAnyUndefs, ElementBits) ||
3354 SplatBitSize > ElementBits)
3356 Cnt = SplatBits.getSExtValue();
3360 /// Check if this is a valid build_vector for the immediate operand of
3361 /// a vector shift left operation. That value must be in the range:
3362 /// 0 <= Value < ElementBits
3363 static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
3364 assert(VT.isVector() && "vector shift count is not a vector type");
3365 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3366 if (!getVShiftImm(Op, ElementBits, Cnt))
3368 return (Cnt >= 0 && Cnt < ElementBits);
3371 /// Check if this is a valid build_vector for the immediate operand of a
3372 /// vector shift right operation. The value must be in the range:
3373 /// 1 <= Value <= ElementBits
3374 static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
3375 assert(VT.isVector() && "vector shift count is not a vector type");
3376 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3377 if (!getVShiftImm(Op, ElementBits, Cnt))
3379 return (Cnt >= 1 && Cnt <= ElementBits);
3382 /// Checks for immediate versions of vector shifts and lowers them.
3383 static SDValue PerformShiftCombine(SDNode *N,
3384 TargetLowering::DAGCombinerInfo &DCI,
3385 const AArch64Subtarget *ST) {
3386 SelectionDAG &DAG = DCI.DAG;
3387 EVT VT = N->getValueType(0);
3388 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
3389 return PerformSRACombine(N, DCI);
3391 // Nothing to be done for scalar shifts.
3392 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3393 if (!VT.isVector() || !TLI.isTypeLegal(VT))
3396 assert(ST->hasNEON() && "unexpected vector shift");
3399 switch (N->getOpcode()) {
3401 llvm_unreachable("unexpected shift opcode");
3404 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
3406 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
3407 DAG.getConstant(Cnt, MVT::i32));
3408 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
3414 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
3416 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
3417 DAG.getConstant(Cnt, MVT::i32));
3418 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
3426 /// ARM-specific DAG combining for intrinsics.
3427 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
3428 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3432 // Don't do anything for most intrinsics.
3435 case Intrinsic::arm_neon_vqshifts:
3436 case Intrinsic::arm_neon_vqshiftu:
3437 EVT VT = N->getOperand(1).getValueType();
3439 if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
3441 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
3442 ? AArch64ISD::NEON_QSHLs
3443 : AArch64ISD::NEON_QSHLu;
3444 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
3445 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
3452 AArch64TargetLowering::PerformDAGCombine(SDNode *N,
3453 DAGCombinerInfo &DCI) const {
3454 switch (N->getOpcode()) {
3456 case ISD::AND: return PerformANDCombine(N, DCI);
3457 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
3461 return PerformShiftCombine(N, DCI, getSubtarget());
3462 case ISD::INTRINSIC_WO_CHAIN:
3463 return PerformIntrinsicCombine(N, DCI.DAG);
3469 AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3470 VT = VT.getScalarType();
3475 switch (VT.getSimpleVT().SimpleTy) {
3489 // If this is a case we can't handle, return null and let the default
3490 // expansion code take care of it.
3492 AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3493 const AArch64Subtarget *ST) const {
3495 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
3497 EVT VT = Op.getValueType();
3499 APInt SplatBits, SplatUndef;
3500 unsigned SplatBitSize;
3503 unsigned UseNeonMov = VT.getSizeInBits() >= 64;
3505 // Note we favor lowering MOVI over MVNI.
3506 // This has implications on the definition of patterns in TableGen to select
3507 // BIC immediate instructions but not ORR immediate instructions.
3508 // If this lowering order is changed, TableGen patterns for BIC immediate and
3509 // ORR immediate instructions have to be updated.
3511 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3512 if (SplatBitSize <= 64) {
3513 // First attempt to use vector immediate-form MOVI
3516 unsigned OpCmode = 0;
3518 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
3519 SplatBitSize, DAG, VT.is128BitVector(),
3520 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
3521 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
3522 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
3524 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
3525 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
3526 ImmVal, OpCmodeVal);
3527 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
3531 // Then attempt to use vector immediate-form MVNI
3532 uint64_t NegatedImm = (~SplatBits).getZExtValue();
3533 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
3534 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
3536 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
3537 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
3538 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
3539 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
3540 ImmVal, OpCmodeVal);
3541 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
3545 // Attempt to use vector immediate-form FMOV
3546 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
3547 (VT == MVT::v2f64 && SplatBitSize == 64)) {
3549 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
3552 if (A64Imms::isFPImm(RealVal, ImmVal)) {
3553 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
3554 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
3560 unsigned NumElts = VT.getVectorNumElements();
3561 bool isOnlyLowElement = true;
3562 bool usesOnlyOneValue = true;
3563 bool hasDominantValue = false;
3564 bool isConstant = true;
3566 // Map of the number of times a particular SDValue appears in the
3568 DenseMap<SDValue, unsigned> ValueCounts;
3570 for (unsigned i = 0; i < NumElts; ++i) {
3571 SDValue V = Op.getOperand(i);
3572 if (V.getOpcode() == ISD::UNDEF)
3575 isOnlyLowElement = false;
3576 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
3579 ValueCounts.insert(std::make_pair(V, 0));
3580 unsigned &Count = ValueCounts[V];
3582 // Is this value dominant? (takes up more than half of the lanes)
3583 if (++Count > (NumElts / 2)) {
3584 hasDominantValue = true;
3588 if (ValueCounts.size() != 1)
3589 usesOnlyOneValue = false;
3590 if (!Value.getNode() && ValueCounts.size() > 0)
3591 Value = ValueCounts.begin()->first;
3593 if (ValueCounts.size() == 0)
3594 return DAG.getUNDEF(VT);
3596 // Loads are better lowered with insert_vector_elt.
3597 // Keep going if we are hitting this case.
3598 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
3599 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
3601 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3602 // Use VDUP for non-constant splats.
3603 if (hasDominantValue && EltSize <= 64) {
3607 // If we are DUPing a value that comes directly from a vector, we could
3608 // just use DUPLANE. We can only do this if the lane being extracted
3609 // is at a constant index, as the DUP from lane instructions only have
3610 // constant-index forms.
3611 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
3612 isa<ConstantSDNode>(Value->getOperand(1))) {
3613 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT,
3614 Value->getOperand(0), Value->getOperand(1));
3616 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
3618 if (!usesOnlyOneValue) {
3619 // The dominant value was splatted as 'N', but we now have to insert
3620 // all differing elements.
3621 for (unsigned I = 0; I < NumElts; ++I) {
3622 if (Op.getOperand(I) == Value)
3624 SmallVector<SDValue, 3> Ops;
3626 Ops.push_back(Op.getOperand(I));
3627 Ops.push_back(DAG.getConstant(I, MVT::i32));
3628 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
3633 if (usesOnlyOneValue && isConstant) {
3634 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
3637 // If all elements are constants and the case above didn't get hit, fall back
3638 // to the default expansion, which will generate a load from the constant
3643 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
3644 // know the default expansion would otherwise fall back on something even
3645 // worse. For a vector with one or two non-undef values, that's
3646 // scalar_to_vector for the elements followed by a shuffle (provided the
3647 // shuffle is valid for the target) and materialization element by element
3648 // on the stack followed by a load for everything else.
3649 if (!isConstant && !usesOnlyOneValue) {
3650 SDValue Vec = DAG.getUNDEF(VT);
3651 for (unsigned i = 0 ; i < NumElts; ++i) {
3652 SDValue V = Op.getOperand(i);
3653 if (V.getOpcode() == ISD::UNDEF)
3655 SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
3656 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
3664 AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
3665 SelectionDAG &DAG) const {
3666 SDValue V1 = Op.getOperand(0);
3667 SDValue V2 = Op.getOperand(1);
3669 EVT VT = Op.getValueType();
3670 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
3672 // Convert shuffles that are directly supported on NEON to target-specific
3673 // DAG nodes, instead of keeping them as shuffles and matching them again
3674 // during code selection. This is more efficient and avoids the possibility
3675 // of inconsistencies between legalization and selection.
3676 ArrayRef<int> ShuffleMask = SVN->getMask();
3678 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3679 if (EltSize <= 64) {
3680 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
3681 int Lane = SVN->getSplatIndex();
3682 // If this is undef splat, generate it via "just" vdup, if possible.
3683 if (Lane == -1) Lane = 0;
3685 // Test if V1 is a SCALAR_TO_VECTOR.
3686 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
3687 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
3689 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
3690 if (V1.getOpcode() == ISD::BUILD_VECTOR) {
3691 bool IsScalarToVector = true;
3692 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
3693 if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
3694 i != (unsigned)Lane) {
3695 IsScalarToVector = false;
3698 if (IsScalarToVector)
3699 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
3700 V1.getOperand(Lane));
3702 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
3703 DAG.getConstant(Lane, MVT::i64));
3705 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
3706 // by element from V2 to V1 .
3707 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
3708 // better choice to be inserted than V1 as less insert needed, so we count
3709 // element to be inserted for both V1 and V2, and select less one as insert
3712 // Collect elements need to be inserted and their index.
3713 SmallVector<int, 8> NV1Elt;
3714 SmallVector<int, 8> N1Index;
3715 SmallVector<int, 8> NV2Elt;
3716 SmallVector<int, 8> N2Index;
3717 int Length = ShuffleMask.size();
3718 int V1EltNum = V1.getValueType().getVectorNumElements();
3719 for (int I = 0; I != Length; ++I) {
3720 if (ShuffleMask[I] != I) {
3721 NV1Elt.push_back(ShuffleMask[I]);
3722 N1Index.push_back(I);
3725 for (int I = 0; I != Length; ++I) {
3726 if (ShuffleMask[I] != (I + V1EltNum)) {
3727 NV2Elt.push_back(ShuffleMask[I]);
3728 N2Index.push_back(I);
3732 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
3733 // will be inserted.
3735 SmallVector<int, 8> InsMasks = NV1Elt;
3736 SmallVector<int, 8> InsIndex = N1Index;
3737 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
3738 if (NV1Elt.size() > NV2Elt.size()) {
3744 InsV = DAG.getNode(ISD::UNDEF, dl, VT);
3749 for (int I = 0, E = InsMasks.size(); I != E; ++I) {
3751 int Mask = InsMasks[I];
3752 if (Mask > V1EltNum) {
3756 // Any value type smaller than i32 is illegal in AArch64, and this lower
3757 // function is called after legalize pass, so we need to legalize
3760 if (VT.getVectorElementType().isFloatingPoint())
3761 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
3763 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
3765 PassN = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
3766 DAG.getConstant(Mask, MVT::i64));
3767 PassN = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, PassN,
3768 DAG.getConstant(InsIndex[I], MVT::i64));
3776 AArch64TargetLowering::ConstraintType
3777 AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
3778 if (Constraint.size() == 1) {
3779 switch (Constraint[0]) {
3781 case 'w': // An FP/SIMD vector register
3782 return C_RegisterClass;
3783 case 'I': // Constant that can be used with an ADD instruction
3784 case 'J': // Constant that can be used with a SUB instruction
3785 case 'K': // Constant that can be used with a 32-bit logical instruction
3786 case 'L': // Constant that can be used with a 64-bit logical instruction
3787 case 'M': // Constant that can be used as a 32-bit MOV immediate
3788 case 'N': // Constant that can be used as a 64-bit MOV immediate
3789 case 'Y': // Floating point constant zero
3790 case 'Z': // Integer constant zero
3792 case 'Q': // A memory reference with base register and no offset
3794 case 'S': // A symbolic address
3799 // FIXME: Ump, Utf, Usa, Ush
3800 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
3801 // whatever they may be
3802 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
3803 // Usa: An absolute symbolic address
3804 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
3805 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
3806 && Constraint != "Ush" && "Unimplemented constraints");
3808 return TargetLowering::getConstraintType(Constraint);
3811 TargetLowering::ConstraintWeight
3812 AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
3813 const char *Constraint) const {
3815 llvm_unreachable("Constraint weight unimplemented");
3819 AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3820 std::string &Constraint,
3821 std::vector<SDValue> &Ops,
3822 SelectionDAG &DAG) const {
3823 SDValue Result(0, 0);
3825 // Only length 1 constraints are C_Other.
3826 if (Constraint.size() != 1) return;
3828 // Only C_Other constraints get lowered like this. That means constants for us
3829 // so return early if there's no hope the constraint can be lowered.
3831 switch(Constraint[0]) {
3833 case 'I': case 'J': case 'K': case 'L':
3834 case 'M': case 'N': case 'Z': {
3835 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
3839 uint64_t CVal = C->getZExtValue();
3842 switch (Constraint[0]) {
3844 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
3845 // is a peculiarly useless SUB constraint.
3846 llvm_unreachable("Unimplemented C_Other constraint");
3852 if (A64Imms::isLogicalImm(32, CVal, Bits))
3856 if (A64Imms::isLogicalImm(64, CVal, Bits))
3865 Result = DAG.getTargetConstant(CVal, Op.getValueType());
3869 // An absolute symbolic address or label reference.
3870 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
3871 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
3872 GA->getValueType(0));
3873 } else if (const BlockAddressSDNode *BA
3874 = dyn_cast<BlockAddressSDNode>(Op)) {
3875 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
3876 BA->getValueType(0));
3877 } else if (const ExternalSymbolSDNode *ES
3878 = dyn_cast<ExternalSymbolSDNode>(Op)) {
3879 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
3880 ES->getValueType(0));
3886 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
3887 if (CFP->isExactlyValue(0.0)) {
3888 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
3895 if (Result.getNode()) {
3896 Ops.push_back(Result);
3900 // It's an unknown constraint for us. Let generic code have a go.
3901 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3904 std::pair<unsigned, const TargetRegisterClass*>
3905 AArch64TargetLowering::getRegForInlineAsmConstraint(
3906 const std::string &Constraint,
3908 if (Constraint.size() == 1) {
3909 switch (Constraint[0]) {
3911 if (VT.getSizeInBits() <= 32)
3912 return std::make_pair(0U, &AArch64::GPR32RegClass);
3913 else if (VT == MVT::i64)
3914 return std::make_pair(0U, &AArch64::GPR64RegClass);
3918 return std::make_pair(0U, &AArch64::FPR16RegClass);
3919 else if (VT == MVT::f32)
3920 return std::make_pair(0U, &AArch64::FPR32RegClass);
3921 else if (VT.getSizeInBits() == 64)
3922 return std::make_pair(0U, &AArch64::FPR64RegClass);
3923 else if (VT.getSizeInBits() == 128)
3924 return std::make_pair(0U, &AArch64::FPR128RegClass);
3929 // Use the default implementation in TargetLowering to convert the register
3930 // constraint into a member of a register class.
3931 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3934 /// Represent NEON load and store intrinsics as MemIntrinsicNodes.
3935 /// The associated MachineMemOperands record the alignment specified
3936 /// in the intrinsic calls.
3937 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
3939 unsigned Intrinsic) const {
3940 switch (Intrinsic) {
3941 case Intrinsic::arm_neon_vld1:
3942 case Intrinsic::arm_neon_vld2:
3943 case Intrinsic::arm_neon_vld3:
3944 case Intrinsic::arm_neon_vld4: {
3945 Info.opc = ISD::INTRINSIC_W_CHAIN;
3946 // Conservatively set memVT to the entire set of vectors loaded.
3947 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
3948 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
3949 Info.ptrVal = I.getArgOperand(0);
3951 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
3952 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
3953 Info.vol = false; // volatile loads with NEON intrinsics not supported
3954 Info.readMem = true;
3955 Info.writeMem = false;
3958 case Intrinsic::arm_neon_vst1:
3959 case Intrinsic::arm_neon_vst2:
3960 case Intrinsic::arm_neon_vst3:
3961 case Intrinsic::arm_neon_vst4: {
3962 Info.opc = ISD::INTRINSIC_VOID;
3963 // Conservatively set memVT to the entire set of vectors stored.
3964 unsigned NumElts = 0;
3965 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
3966 Type *ArgTy = I.getArgOperand(ArgI)->getType();
3967 if (!ArgTy->isVectorTy())
3969 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
3971 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
3972 Info.ptrVal = I.getArgOperand(0);
3974 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
3975 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
3976 Info.vol = false; // volatile stores with NEON intrinsics not supported
3977 Info.readMem = false;
3978 Info.writeMem = true;