1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "aarch64-isel"
17 #include "AArch64ISelLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "AArch64TargetObjectFile.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/CallingConv.h"
32 static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
35 if (Subtarget->isTargetLinux())
36 return new AArch64LinuxTargetObjectFile();
37 if (Subtarget->isTargetELF())
38 return new TargetLoweringObjectFileELF();
39 llvm_unreachable("unknown subtarget type");
42 AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
43 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
45 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
47 // SIMD compares set the entire lane's bits to 1
48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
50 // Scalar register <-> type mapping
51 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
52 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
53 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
54 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
55 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
56 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
58 if (Subtarget->hasNEON()) {
60 addRegisterClass(MVT::v8i8, &AArch64::VPR64RegClass);
61 addRegisterClass(MVT::v4i16, &AArch64::VPR64RegClass);
62 addRegisterClass(MVT::v2i32, &AArch64::VPR64RegClass);
63 addRegisterClass(MVT::v1i64, &AArch64::VPR64RegClass);
64 addRegisterClass(MVT::v2f32, &AArch64::VPR64RegClass);
65 addRegisterClass(MVT::v16i8, &AArch64::VPR128RegClass);
66 addRegisterClass(MVT::v8i16, &AArch64::VPR128RegClass);
67 addRegisterClass(MVT::v4i32, &AArch64::VPR128RegClass);
68 addRegisterClass(MVT::v2i64, &AArch64::VPR128RegClass);
69 addRegisterClass(MVT::v4f32, &AArch64::VPR128RegClass);
70 addRegisterClass(MVT::v2f64, &AArch64::VPR128RegClass);
73 computeRegisterProperties();
75 // We combine OR nodes for bitfield and NEON BSL operations.
76 setTargetDAGCombine(ISD::OR);
78 setTargetDAGCombine(ISD::AND);
79 setTargetDAGCombine(ISD::SRA);
80 setTargetDAGCombine(ISD::SHL);
82 // AArch64 does not have i1 loads, or much of anything for i1 really.
83 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
84 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
85 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
87 setStackPointerRegisterToSaveRestore(AArch64::XSP);
88 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
89 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
90 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
92 // We'll lower globals to wrappers for selection.
93 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
94 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
96 // A64 instructions have the comparison predicate attached to the user of the
97 // result, but having a separate comparison is valuable for matching.
98 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
99 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
100 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
101 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
103 setOperationAction(ISD::SELECT, MVT::i32, Custom);
104 setOperationAction(ISD::SELECT, MVT::i64, Custom);
105 setOperationAction(ISD::SELECT, MVT::f32, Custom);
106 setOperationAction(ISD::SELECT, MVT::f64, Custom);
108 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
109 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
110 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
111 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
113 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
115 setOperationAction(ISD::SETCC, MVT::i32, Custom);
116 setOperationAction(ISD::SETCC, MVT::i64, Custom);
117 setOperationAction(ISD::SETCC, MVT::f32, Custom);
118 setOperationAction(ISD::SETCC, MVT::f64, Custom);
120 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
121 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
122 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
124 setOperationAction(ISD::VASTART, MVT::Other, Custom);
125 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
126 setOperationAction(ISD::VAEND, MVT::Other, Expand);
127 setOperationAction(ISD::VAARG, MVT::Other, Expand);
129 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
131 setOperationAction(ISD::ROTL, MVT::i32, Expand);
132 setOperationAction(ISD::ROTL, MVT::i64, Expand);
134 setOperationAction(ISD::UREM, MVT::i32, Expand);
135 setOperationAction(ISD::UREM, MVT::i64, Expand);
136 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
137 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
139 setOperationAction(ISD::SREM, MVT::i32, Expand);
140 setOperationAction(ISD::SREM, MVT::i64, Expand);
141 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
142 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
144 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
145 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
147 // Legal floating-point operations.
148 setOperationAction(ISD::FABS, MVT::f32, Legal);
149 setOperationAction(ISD::FABS, MVT::f64, Legal);
151 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
152 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
154 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
155 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
157 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
158 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
160 setOperationAction(ISD::FNEG, MVT::f32, Legal);
161 setOperationAction(ISD::FNEG, MVT::f64, Legal);
163 setOperationAction(ISD::FRINT, MVT::f32, Legal);
164 setOperationAction(ISD::FRINT, MVT::f64, Legal);
166 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
167 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
169 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
170 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
172 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
173 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
174 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
176 // Illegal floating-point operations.
177 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
178 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
180 setOperationAction(ISD::FCOS, MVT::f32, Expand);
181 setOperationAction(ISD::FCOS, MVT::f64, Expand);
183 setOperationAction(ISD::FEXP, MVT::f32, Expand);
184 setOperationAction(ISD::FEXP, MVT::f64, Expand);
186 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
187 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
189 setOperationAction(ISD::FLOG, MVT::f32, Expand);
190 setOperationAction(ISD::FLOG, MVT::f64, Expand);
192 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
193 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
195 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
196 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
198 setOperationAction(ISD::FPOW, MVT::f32, Expand);
199 setOperationAction(ISD::FPOW, MVT::f64, Expand);
201 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
202 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
204 setOperationAction(ISD::FREM, MVT::f32, Expand);
205 setOperationAction(ISD::FREM, MVT::f64, Expand);
207 setOperationAction(ISD::FSIN, MVT::f32, Expand);
208 setOperationAction(ISD::FSIN, MVT::f64, Expand);
210 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
211 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
213 // Virtually no operation on f128 is legal, but LLVM can't expand them when
214 // there's a valid register class, so we need custom operations in most cases.
215 setOperationAction(ISD::FABS, MVT::f128, Expand);
216 setOperationAction(ISD::FADD, MVT::f128, Custom);
217 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
218 setOperationAction(ISD::FCOS, MVT::f128, Expand);
219 setOperationAction(ISD::FDIV, MVT::f128, Custom);
220 setOperationAction(ISD::FMA, MVT::f128, Expand);
221 setOperationAction(ISD::FMUL, MVT::f128, Custom);
222 setOperationAction(ISD::FNEG, MVT::f128, Expand);
223 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
224 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
225 setOperationAction(ISD::FPOW, MVT::f128, Expand);
226 setOperationAction(ISD::FREM, MVT::f128, Expand);
227 setOperationAction(ISD::FRINT, MVT::f128, Expand);
228 setOperationAction(ISD::FSIN, MVT::f128, Expand);
229 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
230 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
231 setOperationAction(ISD::FSUB, MVT::f128, Custom);
232 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
233 setOperationAction(ISD::SETCC, MVT::f128, Custom);
234 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
235 setOperationAction(ISD::SELECT, MVT::f128, Expand);
236 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
237 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
239 // Lowering for many of the conversions is actually specified by the non-f128
240 // type. The LowerXXX function will be trivial when f128 isn't involved.
241 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
242 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
243 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
244 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
245 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
246 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
247 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
248 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
249 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
250 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
251 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
252 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
253 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
254 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
256 // This prevents LLVM trying to compress double constants into a floating
257 // constant-pool entry and trying to load from there. It's of doubtful benefit
258 // for A64: we'd need LDR followed by FCVT, I believe.
259 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
260 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
261 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
263 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
264 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
265 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
266 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
267 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
268 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
270 setExceptionPointerRegister(AArch64::X0);
271 setExceptionSelectorRegister(AArch64::X1);
273 if (Subtarget->hasNEON()) {
274 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
275 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
276 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
277 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
278 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
279 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
280 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
281 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
282 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
283 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
284 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
286 setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
287 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
288 setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
289 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
290 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
291 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
292 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
293 setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
294 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
295 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
299 EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
300 // It's reasonably important that this value matches the "natural" legal
301 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
302 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
303 if (!VT.isVector()) return MVT::i32;
304 return VT.changeVectorElementTypeToInteger();
307 static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
310 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
311 AArch64::LDXR_word, AArch64::LDXR_dword};
312 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
313 AArch64::LDAXR_word, AArch64::LDAXR_dword};
314 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
315 AArch64::STXR_word, AArch64::STXR_dword};
316 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
317 AArch64::STLXR_word, AArch64::STLXR_dword};
319 const unsigned *LoadOps, *StoreOps;
320 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
325 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
326 StoreOps = StoreRels;
328 StoreOps = StoreBares;
330 assert(isPowerOf2_32(Size) && Size <= 8 &&
331 "unsupported size for atomic binary op!");
333 LdrOpc = LoadOps[Log2_32(Size)];
334 StrOpc = StoreOps[Log2_32(Size)];
338 AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
340 unsigned BinOpcode) const {
341 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
342 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
344 const BasicBlock *LLVM_BB = BB->getBasicBlock();
345 MachineFunction *MF = BB->getParent();
346 MachineFunction::iterator It = BB;
349 unsigned dest = MI->getOperand(0).getReg();
350 unsigned ptr = MI->getOperand(1).getReg();
351 unsigned incr = MI->getOperand(2).getReg();
352 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
353 DebugLoc dl = MI->getDebugLoc();
355 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
357 unsigned ldrOpc, strOpc;
358 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
360 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
361 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
362 MF->insert(It, loopMBB);
363 MF->insert(It, exitMBB);
365 // Transfer the remainder of BB and its successor edges to exitMBB.
366 exitMBB->splice(exitMBB->begin(), BB,
367 llvm::next(MachineBasicBlock::iterator(MI)),
369 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
371 const TargetRegisterClass *TRC
372 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
373 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
377 // fallthrough --> loopMBB
378 BB->addSuccessor(loopMBB);
382 // <binop> scratch, dest, incr
383 // stxr stxr_status, scratch, ptr
384 // cbnz stxr_status, loopMBB
385 // fallthrough --> exitMBB
387 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
389 // All arithmetic operations we'll be creating are designed to take an extra
390 // shift or extend operand, which we can conveniently set to zero.
392 // Operand order needs to go the other way for NAND.
393 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
394 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
395 .addReg(incr).addReg(dest).addImm(0);
397 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
398 .addReg(dest).addReg(incr).addImm(0);
401 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
402 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
403 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
405 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
406 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
407 .addReg(stxr_status).addMBB(loopMBB);
409 BB->addSuccessor(loopMBB);
410 BB->addSuccessor(exitMBB);
416 MI->eraseFromParent(); // The instruction is gone now.
422 AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
423 MachineBasicBlock *BB,
426 A64CC::CondCodes Cond) const {
427 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
429 const BasicBlock *LLVM_BB = BB->getBasicBlock();
430 MachineFunction *MF = BB->getParent();
431 MachineFunction::iterator It = BB;
434 unsigned dest = MI->getOperand(0).getReg();
435 unsigned ptr = MI->getOperand(1).getReg();
436 unsigned incr = MI->getOperand(2).getReg();
437 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
439 unsigned oldval = dest;
440 DebugLoc dl = MI->getDebugLoc();
442 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
443 const TargetRegisterClass *TRC, *TRCsp;
445 TRC = &AArch64::GPR64RegClass;
446 TRCsp = &AArch64::GPR64xspRegClass;
448 TRC = &AArch64::GPR32RegClass;
449 TRCsp = &AArch64::GPR32wspRegClass;
452 unsigned ldrOpc, strOpc;
453 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
455 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
456 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
457 MF->insert(It, loopMBB);
458 MF->insert(It, exitMBB);
460 // Transfer the remainder of BB and its successor edges to exitMBB.
461 exitMBB->splice(exitMBB->begin(), BB,
462 llvm::next(MachineBasicBlock::iterator(MI)),
464 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
466 unsigned scratch = MRI.createVirtualRegister(TRC);
467 MRI.constrainRegClass(scratch, TRCsp);
471 // fallthrough --> loopMBB
472 BB->addSuccessor(loopMBB);
476 // cmp incr, dest (, sign extend if necessary)
477 // csel scratch, dest, incr, cond
478 // stxr stxr_status, scratch, ptr
479 // cbnz stxr_status, loopMBB
480 // fallthrough --> exitMBB
482 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
484 // Build compare and cmov instructions.
485 MRI.constrainRegClass(incr, TRCsp);
486 BuildMI(BB, dl, TII->get(CmpOp))
487 .addReg(incr).addReg(oldval).addImm(0);
489 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
491 .addReg(oldval).addReg(incr).addImm(Cond);
493 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
494 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
496 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
497 .addReg(scratch).addReg(ptr);
498 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
499 .addReg(stxr_status).addMBB(loopMBB);
501 BB->addSuccessor(loopMBB);
502 BB->addSuccessor(exitMBB);
508 MI->eraseFromParent(); // The instruction is gone now.
514 AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
515 MachineBasicBlock *BB,
516 unsigned Size) const {
517 unsigned dest = MI->getOperand(0).getReg();
518 unsigned ptr = MI->getOperand(1).getReg();
519 unsigned oldval = MI->getOperand(2).getReg();
520 unsigned newval = MI->getOperand(3).getReg();
521 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
522 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
523 DebugLoc dl = MI->getDebugLoc();
525 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
526 const TargetRegisterClass *TRCsp;
527 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
529 unsigned ldrOpc, strOpc;
530 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
532 MachineFunction *MF = BB->getParent();
533 const BasicBlock *LLVM_BB = BB->getBasicBlock();
534 MachineFunction::iterator It = BB;
535 ++It; // insert the new blocks after the current block
537 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
538 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
539 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
540 MF->insert(It, loop1MBB);
541 MF->insert(It, loop2MBB);
542 MF->insert(It, exitMBB);
544 // Transfer the remainder of BB and its successor edges to exitMBB.
545 exitMBB->splice(exitMBB->begin(), BB,
546 llvm::next(MachineBasicBlock::iterator(MI)),
548 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
552 // fallthrough --> loop1MBB
553 BB->addSuccessor(loop1MBB);
560 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
562 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
563 MRI.constrainRegClass(dest, TRCsp);
564 BuildMI(BB, dl, TII->get(CmpOp))
565 .addReg(dest).addReg(oldval).addImm(0);
566 BuildMI(BB, dl, TII->get(AArch64::Bcc))
567 .addImm(A64CC::NE).addMBB(exitMBB);
568 BB->addSuccessor(loop2MBB);
569 BB->addSuccessor(exitMBB);
572 // strex stxr_status, newval, [ptr]
573 // cbnz stxr_status, loop1MBB
575 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
576 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
578 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
579 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
580 .addReg(stxr_status).addMBB(loop1MBB);
581 BB->addSuccessor(loop1MBB);
582 BB->addSuccessor(exitMBB);
588 MI->eraseFromParent(); // The instruction is gone now.
594 AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
595 MachineBasicBlock *MBB) const {
596 // We materialise the F128CSEL pseudo-instruction using conditional branches
597 // and loads, giving an instruciton sequence like:
606 // Using virtual registers would probably not be beneficial since COPY
607 // instructions are expensive for f128 (there's no actual instruction to
610 // An alternative would be to do an integer-CSEL on some address. E.g.:
615 // csel x0, x0, x1, ne
618 // It's unclear which approach is actually optimal.
619 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
620 MachineFunction *MF = MBB->getParent();
621 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
622 DebugLoc DL = MI->getDebugLoc();
623 MachineFunction::iterator It = MBB;
626 unsigned DestReg = MI->getOperand(0).getReg();
627 unsigned IfTrueReg = MI->getOperand(1).getReg();
628 unsigned IfFalseReg = MI->getOperand(2).getReg();
629 unsigned CondCode = MI->getOperand(3).getImm();
630 bool NZCVKilled = MI->getOperand(4).isKill();
632 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
633 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
634 MF->insert(It, TrueBB);
635 MF->insert(It, EndBB);
637 // Transfer rest of current basic-block to EndBB
638 EndBB->splice(EndBB->begin(), MBB,
639 llvm::next(MachineBasicBlock::iterator(MI)),
641 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
643 // We need somewhere to store the f128 value needed.
644 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
646 // [... start of incoming MBB ...]
647 // str qIFFALSE, [sp]
650 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
652 .addFrameIndex(ScratchFI)
654 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
657 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
659 MBB->addSuccessor(TrueBB);
660 MBB->addSuccessor(EndBB);
664 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
666 .addFrameIndex(ScratchFI)
669 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
671 TrueBB->addSuccessor(EndBB);
675 // [... rest of incoming MBB ...]
677 EndBB->addLiveIn(AArch64::NZCV);
678 MachineInstr *StartOfEnd = EndBB->begin();
679 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
680 .addFrameIndex(ScratchFI)
683 MI->eraseFromParent();
688 AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
689 MachineBasicBlock *MBB) const {
690 switch (MI->getOpcode()) {
691 default: llvm_unreachable("Unhandled instruction with custom inserter");
692 case AArch64::F128CSEL:
693 return EmitF128CSEL(MI, MBB);
694 case AArch64::ATOMIC_LOAD_ADD_I8:
695 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
696 case AArch64::ATOMIC_LOAD_ADD_I16:
697 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
698 case AArch64::ATOMIC_LOAD_ADD_I32:
699 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
700 case AArch64::ATOMIC_LOAD_ADD_I64:
701 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
703 case AArch64::ATOMIC_LOAD_SUB_I8:
704 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
705 case AArch64::ATOMIC_LOAD_SUB_I16:
706 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
707 case AArch64::ATOMIC_LOAD_SUB_I32:
708 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
709 case AArch64::ATOMIC_LOAD_SUB_I64:
710 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
712 case AArch64::ATOMIC_LOAD_AND_I8:
713 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
714 case AArch64::ATOMIC_LOAD_AND_I16:
715 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
716 case AArch64::ATOMIC_LOAD_AND_I32:
717 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
718 case AArch64::ATOMIC_LOAD_AND_I64:
719 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
721 case AArch64::ATOMIC_LOAD_OR_I8:
722 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
723 case AArch64::ATOMIC_LOAD_OR_I16:
724 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
725 case AArch64::ATOMIC_LOAD_OR_I32:
726 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
727 case AArch64::ATOMIC_LOAD_OR_I64:
728 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
730 case AArch64::ATOMIC_LOAD_XOR_I8:
731 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
732 case AArch64::ATOMIC_LOAD_XOR_I16:
733 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
734 case AArch64::ATOMIC_LOAD_XOR_I32:
735 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
736 case AArch64::ATOMIC_LOAD_XOR_I64:
737 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
739 case AArch64::ATOMIC_LOAD_NAND_I8:
740 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
741 case AArch64::ATOMIC_LOAD_NAND_I16:
742 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
743 case AArch64::ATOMIC_LOAD_NAND_I32:
744 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
745 case AArch64::ATOMIC_LOAD_NAND_I64:
746 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
748 case AArch64::ATOMIC_LOAD_MIN_I8:
749 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
750 case AArch64::ATOMIC_LOAD_MIN_I16:
751 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
752 case AArch64::ATOMIC_LOAD_MIN_I32:
753 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
754 case AArch64::ATOMIC_LOAD_MIN_I64:
755 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
757 case AArch64::ATOMIC_LOAD_MAX_I8:
758 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
759 case AArch64::ATOMIC_LOAD_MAX_I16:
760 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
761 case AArch64::ATOMIC_LOAD_MAX_I32:
762 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
763 case AArch64::ATOMIC_LOAD_MAX_I64:
764 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
766 case AArch64::ATOMIC_LOAD_UMIN_I8:
767 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
768 case AArch64::ATOMIC_LOAD_UMIN_I16:
769 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
770 case AArch64::ATOMIC_LOAD_UMIN_I32:
771 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
772 case AArch64::ATOMIC_LOAD_UMIN_I64:
773 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
775 case AArch64::ATOMIC_LOAD_UMAX_I8:
776 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
777 case AArch64::ATOMIC_LOAD_UMAX_I16:
778 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
779 case AArch64::ATOMIC_LOAD_UMAX_I32:
780 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
781 case AArch64::ATOMIC_LOAD_UMAX_I64:
782 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
784 case AArch64::ATOMIC_SWAP_I8:
785 return emitAtomicBinary(MI, MBB, 1, 0);
786 case AArch64::ATOMIC_SWAP_I16:
787 return emitAtomicBinary(MI, MBB, 2, 0);
788 case AArch64::ATOMIC_SWAP_I32:
789 return emitAtomicBinary(MI, MBB, 4, 0);
790 case AArch64::ATOMIC_SWAP_I64:
791 return emitAtomicBinary(MI, MBB, 8, 0);
793 case AArch64::ATOMIC_CMP_SWAP_I8:
794 return emitAtomicCmpSwap(MI, MBB, 1);
795 case AArch64::ATOMIC_CMP_SWAP_I16:
796 return emitAtomicCmpSwap(MI, MBB, 2);
797 case AArch64::ATOMIC_CMP_SWAP_I32:
798 return emitAtomicCmpSwap(MI, MBB, 4);
799 case AArch64::ATOMIC_CMP_SWAP_I64:
800 return emitAtomicCmpSwap(MI, MBB, 8);
805 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
807 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
808 case AArch64ISD::Call: return "AArch64ISD::Call";
809 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
810 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
811 case AArch64ISD::BFI: return "AArch64ISD::BFI";
812 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
813 case AArch64ISD::Ret: return "AArch64ISD::Ret";
814 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
815 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
816 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
817 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
818 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
819 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
820 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
821 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
823 case AArch64ISD::NEON_BSL:
824 return "AArch64ISD::NEON_BSL";
825 case AArch64ISD::NEON_MOVIMM:
826 return "AArch64ISD::NEON_MOVIMM";
827 case AArch64ISD::NEON_MVNIMM:
828 return "AArch64ISD::NEON_MVNIMM";
829 case AArch64ISD::NEON_FMOVIMM:
830 return "AArch64ISD::NEON_FMOVIMM";
831 case AArch64ISD::NEON_CMP:
832 return "AArch64ISD::NEON_CMP";
833 case AArch64ISD::NEON_CMPZ:
834 return "AArch64ISD::NEON_CMPZ";
835 case AArch64ISD::NEON_TST:
836 return "AArch64ISD::NEON_TST";
842 static const uint16_t AArch64FPRArgRegs[] = {
843 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
844 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
846 static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
848 static const uint16_t AArch64ArgRegs[] = {
849 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
850 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
852 static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
854 static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
855 CCValAssign::LocInfo LocInfo,
856 ISD::ArgFlagsTy ArgFlags, CCState &State) {
857 // Mark all remaining general purpose registers as allocated. We don't
858 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
859 // i64 will go in registers (C.11).
860 for (unsigned i = 0; i < NumArgRegs; ++i)
861 State.AllocateReg(AArch64ArgRegs[i]);
866 #include "AArch64GenCallingConv.inc"
868 CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
871 default: llvm_unreachable("Unsupported calling convention");
872 case CallingConv::Fast:
879 AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
880 SDLoc DL, SDValue &Chain) const {
881 MachineFunction &MF = DAG.getMachineFunction();
882 MachineFrameInfo *MFI = MF.getFrameInfo();
883 AArch64MachineFunctionInfo *FuncInfo
884 = MF.getInfo<AArch64MachineFunctionInfo>();
886 SmallVector<SDValue, 8> MemOps;
888 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
890 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
893 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
895 if (GPRSaveSize != 0) {
896 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
898 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
900 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
901 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
902 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
903 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
904 MachinePointerInfo::getStack(i * 8),
906 MemOps.push_back(Store);
907 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
908 DAG.getConstant(8, getPointerTy()));
912 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
914 if (FPRSaveSize != 0) {
915 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
917 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
919 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
920 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
921 &AArch64::FPR128RegClass);
922 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
923 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
924 MachinePointerInfo::getStack(i * 16),
926 MemOps.push_back(Store);
927 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
928 DAG.getConstant(16, getPointerTy()));
932 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
934 FuncInfo->setVariadicStackIdx(StackIdx);
935 FuncInfo->setVariadicGPRIdx(GPRIdx);
936 FuncInfo->setVariadicGPRSize(GPRSaveSize);
937 FuncInfo->setVariadicFPRIdx(FPRIdx);
938 FuncInfo->setVariadicFPRSize(FPRSaveSize);
940 if (!MemOps.empty()) {
941 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
948 AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
949 CallingConv::ID CallConv, bool isVarArg,
950 const SmallVectorImpl<ISD::InputArg> &Ins,
951 SDLoc dl, SelectionDAG &DAG,
952 SmallVectorImpl<SDValue> &InVals) const {
953 MachineFunction &MF = DAG.getMachineFunction();
954 AArch64MachineFunctionInfo *FuncInfo
955 = MF.getInfo<AArch64MachineFunctionInfo>();
956 MachineFrameInfo *MFI = MF.getFrameInfo();
957 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
959 SmallVector<CCValAssign, 16> ArgLocs;
960 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
961 getTargetMachine(), ArgLocs, *DAG.getContext());
962 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
964 SmallVector<SDValue, 16> ArgValues;
967 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
968 CCValAssign &VA = ArgLocs[i];
969 ISD::ArgFlagsTy Flags = Ins[i].Flags;
971 if (Flags.isByVal()) {
972 // Byval is used for small structs and HFAs in the PCS, but the system
973 // should work in a non-compliant manner for larger structs.
974 EVT PtrTy = getPointerTy();
975 int Size = Flags.getByValSize();
976 unsigned NumRegs = (Size + 7) / 8;
978 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
979 VA.getLocMemOffset(),
981 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
982 InVals.push_back(FrameIdxN);
985 } else if (VA.isRegLoc()) {
986 MVT RegVT = VA.getLocVT();
987 const TargetRegisterClass *RC = getRegClassFor(RegVT);
988 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
990 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
991 } else { // VA.isRegLoc()
992 assert(VA.isMemLoc());
994 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
995 VA.getLocMemOffset(), true);
997 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
998 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
999 MachinePointerInfo::getFixedStack(FI),
1000 false, false, false, 0);
1005 switch (VA.getLocInfo()) {
1006 default: llvm_unreachable("Unknown loc info!");
1007 case CCValAssign::Full: break;
1008 case CCValAssign::BCvt:
1009 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
1011 case CCValAssign::SExt:
1012 case CCValAssign::ZExt:
1013 case CCValAssign::AExt: {
1014 unsigned DestSize = VA.getValVT().getSizeInBits();
1015 unsigned DestSubReg;
1018 case 8: DestSubReg = AArch64::sub_8; break;
1019 case 16: DestSubReg = AArch64::sub_16; break;
1020 case 32: DestSubReg = AArch64::sub_32; break;
1021 case 64: DestSubReg = AArch64::sub_64; break;
1022 default: llvm_unreachable("Unexpected argument promotion");
1025 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1026 VA.getValVT(), ArgValue,
1027 DAG.getTargetConstant(DestSubReg, MVT::i32)),
1033 InVals.push_back(ArgValue);
1037 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
1039 unsigned StackArgSize = CCInfo.getNextStackOffset();
1040 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
1041 // This is a non-standard ABI so by fiat I say we're allowed to make full
1042 // use of the stack area to be popped, which must be aligned to 16 bytes in
1044 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
1046 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
1047 // a multiple of 16.
1048 FuncInfo->setArgumentStackToRestore(StackArgSize);
1050 // This realignment carries over to the available bytes below. Our own
1051 // callers will guarantee the space is free by giving an aligned value to
1054 // Even if we're not expected to free up the space, it's useful to know how
1055 // much is there while considering tail calls (because we can reuse it).
1056 FuncInfo->setBytesInStackArgArea(StackArgSize);
1062 AArch64TargetLowering::LowerReturn(SDValue Chain,
1063 CallingConv::ID CallConv, bool isVarArg,
1064 const SmallVectorImpl<ISD::OutputArg> &Outs,
1065 const SmallVectorImpl<SDValue> &OutVals,
1066 SDLoc dl, SelectionDAG &DAG) const {
1067 // CCValAssign - represent the assignment of the return value to a location.
1068 SmallVector<CCValAssign, 16> RVLocs;
1070 // CCState - Info about the registers and stack slots.
1071 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1072 getTargetMachine(), RVLocs, *DAG.getContext());
1074 // Analyze outgoing return values.
1075 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1078 SmallVector<SDValue, 4> RetOps(1, Chain);
1080 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1081 // PCS: "If the type, T, of the result of a function is such that
1082 // void func(T arg) would require that arg be passed as a value in a
1083 // register (or set of registers) according to the rules in 5.4, then the
1084 // result is returned in the same registers as would be used for such an
1087 // Otherwise, the caller shall reserve a block of memory of sufficient
1088 // size and alignment to hold the result. The address of the memory block
1089 // shall be passed as an additional argument to the function in x8."
1091 // This is implemented in two places. The register-return values are dealt
1092 // with here, more complex returns are passed as an sret parameter, which
1093 // means we don't have to worry about it during actual return.
1094 CCValAssign &VA = RVLocs[i];
1095 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1098 SDValue Arg = OutVals[i];
1100 // There's no convenient note in the ABI about this as there is for normal
1101 // arguments, but it says return values are passed in the same registers as
1102 // an argument would be. I believe that includes the comments about
1103 // unspecified higher bits, putting the burden of widening on the *caller*
1104 // for return values.
1105 switch (VA.getLocInfo()) {
1106 default: llvm_unreachable("Unknown loc info");
1107 case CCValAssign::Full: break;
1108 case CCValAssign::SExt:
1109 case CCValAssign::ZExt:
1110 case CCValAssign::AExt:
1111 // Floating-point values should only be extended when they're going into
1112 // memory, which can't happen here so an integer extend is acceptable.
1113 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1115 case CCValAssign::BCvt:
1116 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1120 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1121 Flag = Chain.getValue(1);
1122 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1125 RetOps[0] = Chain; // Update chain.
1127 // Add the flag if we have it.
1129 RetOps.push_back(Flag);
1131 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1132 &RetOps[0], RetOps.size());
1136 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1137 SmallVectorImpl<SDValue> &InVals) const {
1138 SelectionDAG &DAG = CLI.DAG;
1140 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1141 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1142 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1143 SDValue Chain = CLI.Chain;
1144 SDValue Callee = CLI.Callee;
1145 bool &IsTailCall = CLI.IsTailCall;
1146 CallingConv::ID CallConv = CLI.CallConv;
1147 bool IsVarArg = CLI.IsVarArg;
1149 MachineFunction &MF = DAG.getMachineFunction();
1150 AArch64MachineFunctionInfo *FuncInfo
1151 = MF.getInfo<AArch64MachineFunctionInfo>();
1152 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1153 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1154 bool IsSibCall = false;
1157 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1158 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1159 Outs, OutVals, Ins, DAG);
1161 // A sibling call is one where we're under the usual C ABI and not planning
1162 // to change that but can still do a tail call:
1163 if (!TailCallOpt && IsTailCall)
1167 SmallVector<CCValAssign, 16> ArgLocs;
1168 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1169 getTargetMachine(), ArgLocs, *DAG.getContext());
1170 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1172 // On AArch64 (and all other architectures I'm aware of) the most this has to
1173 // do is adjust the stack pointer.
1174 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1176 // Since we're not changing the ABI to make this a tail call, the memory
1177 // operands are already available in the caller's incoming argument space.
1181 // FPDiff is the byte offset of the call's argument area from the callee's.
1182 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1183 // by this amount for a tail call. In a sibling call it must be 0 because the
1184 // caller will deallocate the entire stack and the callee still expects its
1185 // arguments to begin at SP+0. Completely unused for non-tail calls.
1188 if (IsTailCall && !IsSibCall) {
1189 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1191 // FPDiff will be negative if this tail call requires more space than we
1192 // would automatically have in our incoming argument space. Positive if we
1193 // can actually shrink the stack.
1194 FPDiff = NumReusableBytes - NumBytes;
1196 // The stack pointer must be 16-byte aligned at all times it's used for a
1197 // memory operation, which in practice means at *all* times and in
1198 // particular across call boundaries. Therefore our own arguments started at
1199 // a 16-byte aligned SP and the delta applied for the tail call should
1200 // satisfy the same constraint.
1201 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1205 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
1208 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1211 SmallVector<SDValue, 8> MemOpChains;
1212 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1214 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1215 CCValAssign &VA = ArgLocs[i];
1216 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1217 SDValue Arg = OutVals[i];
1219 // Callee does the actual widening, so all extensions just use an implicit
1220 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1221 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1222 // alternative works on integer types too.
1223 switch (VA.getLocInfo()) {
1224 default: llvm_unreachable("Unknown loc info!");
1225 case CCValAssign::Full: break;
1226 case CCValAssign::SExt:
1227 case CCValAssign::ZExt:
1228 case CCValAssign::AExt: {
1229 unsigned SrcSize = VA.getValVT().getSizeInBits();
1233 case 8: SrcSubReg = AArch64::sub_8; break;
1234 case 16: SrcSubReg = AArch64::sub_16; break;
1235 case 32: SrcSubReg = AArch64::sub_32; break;
1236 case 64: SrcSubReg = AArch64::sub_64; break;
1237 default: llvm_unreachable("Unexpected argument promotion");
1240 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1242 DAG.getUNDEF(VA.getLocVT()),
1244 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1249 case CCValAssign::BCvt:
1250 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1254 if (VA.isRegLoc()) {
1255 // A normal register (sub-) argument. For now we just note it down because
1256 // we want to copy things into registers as late as possible to avoid
1257 // register-pressure (and possibly worse).
1258 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1262 assert(VA.isMemLoc() && "unexpected argument location");
1265 MachinePointerInfo DstInfo;
1267 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1268 VA.getLocVT().getSizeInBits();
1269 OpSize = (OpSize + 7) / 8;
1270 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1271 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1273 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1274 DstInfo = MachinePointerInfo::getFixedStack(FI);
1276 // Make sure any stack arguments overlapping with where we're storing are
1277 // loaded before this eventual operation. Otherwise they'll be clobbered.
1278 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1280 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
1282 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1283 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1286 if (Flags.isByVal()) {
1287 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1288 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1289 Flags.getByValAlign(),
1290 /*isVolatile = */ false,
1291 /*alwaysInline = */ false,
1292 DstInfo, MachinePointerInfo(0));
1293 MemOpChains.push_back(Cpy);
1295 // Normal stack argument, put it where it's needed.
1296 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1298 MemOpChains.push_back(Store);
1302 // The loads and stores generated above shouldn't clash with each
1303 // other. Combining them with this TokenFactor notes that fact for the rest of
1305 if (!MemOpChains.empty())
1306 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1307 &MemOpChains[0], MemOpChains.size());
1309 // Most of the rest of the instructions need to be glued together; we don't
1310 // want assignments to actual registers used by a call to be rearranged by a
1311 // well-meaning scheduler.
1314 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1315 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1316 RegsToPass[i].second, InFlag);
1317 InFlag = Chain.getValue(1);
1320 // The linker is responsible for inserting veneers when necessary to put a
1321 // function call destination in range, so we don't need to bother with a
1323 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1324 const GlobalValue *GV = G->getGlobal();
1325 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1326 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1327 const char *Sym = S->getSymbol();
1328 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1331 // We don't usually want to end the call-sequence here because we would tidy
1332 // the frame up *after* the call, however in the ABI-changing tail-call case
1333 // we've carefully laid out the parameters so that when sp is reset they'll be
1334 // in the correct location.
1335 if (IsTailCall && !IsSibCall) {
1336 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1337 DAG.getIntPtrConstant(0, true), InFlag, dl);
1338 InFlag = Chain.getValue(1);
1341 // We produce the following DAG scheme for the actual call instruction:
1342 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1344 // Most arguments aren't going to be used and just keep the values live as
1345 // far as LLVM is concerned. It's expected to be selected as simply "bl
1346 // callee" (for a direct, non-tail call).
1347 std::vector<SDValue> Ops;
1348 Ops.push_back(Chain);
1349 Ops.push_back(Callee);
1352 // Each tail call may have to adjust the stack by a different amount, so
1353 // this information must travel along with the operation for eventual
1354 // consumption by emitEpilogue.
1355 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1358 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1359 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1360 RegsToPass[i].second.getValueType()));
1363 // Add a register mask operand representing the call-preserved registers. This
1364 // is used later in codegen to constrain register-allocation.
1365 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1366 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1367 assert(Mask && "Missing call preserved mask for calling convention");
1368 Ops.push_back(DAG.getRegisterMask(Mask));
1370 // If we needed glue, put it in as the last argument.
1371 if (InFlag.getNode())
1372 Ops.push_back(InFlag);
1374 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1377 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1380 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1381 InFlag = Chain.getValue(1);
1383 // Now we can reclaim the stack, just as well do it before working out where
1384 // our return value is.
1386 uint64_t CalleePopBytes
1387 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1389 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1390 DAG.getIntPtrConstant(CalleePopBytes, true),
1392 InFlag = Chain.getValue(1);
1395 return LowerCallResult(Chain, InFlag, CallConv,
1396 IsVarArg, Ins, dl, DAG, InVals);
1400 AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1401 CallingConv::ID CallConv, bool IsVarArg,
1402 const SmallVectorImpl<ISD::InputArg> &Ins,
1403 SDLoc dl, SelectionDAG &DAG,
1404 SmallVectorImpl<SDValue> &InVals) const {
1405 // Assign locations to each value returned by this call.
1406 SmallVector<CCValAssign, 16> RVLocs;
1407 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1408 getTargetMachine(), RVLocs, *DAG.getContext());
1409 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1411 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1412 CCValAssign VA = RVLocs[i];
1414 // Return values that are too big to fit into registers should use an sret
1415 // pointer, so this can be a lot simpler than the main argument code.
1416 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1418 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1420 Chain = Val.getValue(1);
1421 InFlag = Val.getValue(2);
1423 switch (VA.getLocInfo()) {
1424 default: llvm_unreachable("Unknown loc info!");
1425 case CCValAssign::Full: break;
1426 case CCValAssign::BCvt:
1427 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1429 case CCValAssign::ZExt:
1430 case CCValAssign::SExt:
1431 case CCValAssign::AExt:
1432 // Floating-point arguments only get extended/truncated if they're going
1433 // in memory, so using the integer operation is acceptable here.
1434 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1438 InVals.push_back(Val);
1445 AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1446 CallingConv::ID CalleeCC,
1448 bool IsCalleeStructRet,
1449 bool IsCallerStructRet,
1450 const SmallVectorImpl<ISD::OutputArg> &Outs,
1451 const SmallVectorImpl<SDValue> &OutVals,
1452 const SmallVectorImpl<ISD::InputArg> &Ins,
1453 SelectionDAG& DAG) const {
1455 // For CallingConv::C this function knows whether the ABI needs
1456 // changing. That's not true for other conventions so they will have to opt in
1458 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1461 const MachineFunction &MF = DAG.getMachineFunction();
1462 const Function *CallerF = MF.getFunction();
1463 CallingConv::ID CallerCC = CallerF->getCallingConv();
1464 bool CCMatch = CallerCC == CalleeCC;
1466 // Byval parameters hand the function a pointer directly into the stack area
1467 // we want to reuse during a tail call. Working around this *is* possible (see
1468 // X86) but less efficient and uglier in LowerCall.
1469 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1470 e = CallerF->arg_end(); i != e; ++i)
1471 if (i->hasByValAttr())
1474 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1475 if (IsTailCallConvention(CalleeCC) && CCMatch)
1480 // Now we search for cases where we can use a tail call without changing the
1481 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1484 // I want anyone implementing a new calling convention to think long and hard
1485 // about this assert.
1486 assert((!IsVarArg || CalleeCC == CallingConv::C)
1487 && "Unexpected variadic calling convention");
1489 if (IsVarArg && !Outs.empty()) {
1490 // At least two cases here: if caller is fastcc then we can't have any
1491 // memory arguments (we'd be expected to clean up the stack afterwards). If
1492 // caller is C then we could potentially use its argument area.
1494 // FIXME: for now we take the most conservative of these in both cases:
1495 // disallow all variadic memory operands.
1496 SmallVector<CCValAssign, 16> ArgLocs;
1497 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1498 getTargetMachine(), ArgLocs, *DAG.getContext());
1500 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1501 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1502 if (!ArgLocs[i].isRegLoc())
1506 // If the calling conventions do not match, then we'd better make sure the
1507 // results are returned in the same way as what the caller expects.
1509 SmallVector<CCValAssign, 16> RVLocs1;
1510 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1511 getTargetMachine(), RVLocs1, *DAG.getContext());
1512 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1514 SmallVector<CCValAssign, 16> RVLocs2;
1515 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1516 getTargetMachine(), RVLocs2, *DAG.getContext());
1517 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1519 if (RVLocs1.size() != RVLocs2.size())
1521 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1522 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1524 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1526 if (RVLocs1[i].isRegLoc()) {
1527 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1530 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1536 // Nothing more to check if the callee is taking no arguments
1540 SmallVector<CCValAssign, 16> ArgLocs;
1541 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1542 getTargetMachine(), ArgLocs, *DAG.getContext());
1544 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1546 const AArch64MachineFunctionInfo *FuncInfo
1547 = MF.getInfo<AArch64MachineFunctionInfo>();
1549 // If the stack arguments for this call would fit into our own save area then
1550 // the call can be made tail.
1551 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1554 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1555 bool TailCallOpt) const {
1556 return CallCC == CallingConv::Fast && TailCallOpt;
1559 bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1560 return CallCC == CallingConv::Fast;
1563 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1565 MachineFrameInfo *MFI,
1566 int ClobberedFI) const {
1567 SmallVector<SDValue, 8> ArgChains;
1568 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1569 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1571 // Include the original chain at the beginning of the list. When this is
1572 // used by target LowerCall hooks, this helps legalize find the
1573 // CALLSEQ_BEGIN node.
1574 ArgChains.push_back(Chain);
1576 // Add a chain value for each stack argument corresponding
1577 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1578 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1579 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1580 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1581 if (FI->getIndex() < 0) {
1582 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1583 int64_t InLastByte = InFirstByte;
1584 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1586 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1587 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1588 ArgChains.push_back(SDValue(L, 1));
1591 // Build a tokenfactor for all the chains.
1592 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
1593 &ArgChains[0], ArgChains.size());
1596 static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1598 case ISD::SETEQ: return A64CC::EQ;
1599 case ISD::SETGT: return A64CC::GT;
1600 case ISD::SETGE: return A64CC::GE;
1601 case ISD::SETLT: return A64CC::LT;
1602 case ISD::SETLE: return A64CC::LE;
1603 case ISD::SETNE: return A64CC::NE;
1604 case ISD::SETUGT: return A64CC::HI;
1605 case ISD::SETUGE: return A64CC::HS;
1606 case ISD::SETULT: return A64CC::LO;
1607 case ISD::SETULE: return A64CC::LS;
1608 default: llvm_unreachable("Unexpected condition code");
1612 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1613 // icmp is implemented using adds/subs immediate, which take an unsigned
1614 // 12-bit immediate, optionally shifted left by 12 bits.
1616 // Symmetric by using adds/subs
1620 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1623 SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1624 ISD::CondCode CC, SDValue &A64cc,
1625 SelectionDAG &DAG, SDLoc &dl) const {
1626 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1628 EVT VT = RHSC->getValueType(0);
1629 bool knownInvalid = false;
1631 // I'm not convinced the rest of LLVM handles these edge cases properly, but
1632 // we can at least get it right.
1633 if (isSignedIntSetCC(CC)) {
1634 C = RHSC->getSExtValue();
1635 } else if (RHSC->getZExtValue() > INT64_MAX) {
1636 // A 64-bit constant not representable by a signed 64-bit integer is far
1637 // too big to fit into a SUBS immediate anyway.
1638 knownInvalid = true;
1640 C = RHSC->getZExtValue();
1643 if (!knownInvalid && !isLegalICmpImmediate(C)) {
1644 // Constant does not fit, try adjusting it by one?
1649 if (isLegalICmpImmediate(C-1)) {
1650 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1651 RHS = DAG.getConstant(C-1, VT);
1656 if (isLegalICmpImmediate(C-1)) {
1657 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1658 RHS = DAG.getConstant(C-1, VT);
1663 if (isLegalICmpImmediate(C+1)) {
1664 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1665 RHS = DAG.getConstant(C+1, VT);
1670 if (isLegalICmpImmediate(C+1)) {
1671 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1672 RHS = DAG.getConstant(C+1, VT);
1679 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
1680 A64cc = DAG.getConstant(CondCode, MVT::i32);
1681 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1682 DAG.getCondCode(CC));
1685 static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
1686 A64CC::CondCodes &Alternative) {
1687 A64CC::CondCodes CondCode = A64CC::Invalid;
1688 Alternative = A64CC::Invalid;
1691 default: llvm_unreachable("Unknown FP condition!");
1693 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
1695 case ISD::SETOGT: CondCode = A64CC::GT; break;
1697 case ISD::SETOGE: CondCode = A64CC::GE; break;
1698 case ISD::SETOLT: CondCode = A64CC::MI; break;
1699 case ISD::SETOLE: CondCode = A64CC::LS; break;
1700 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
1701 case ISD::SETO: CondCode = A64CC::VC; break;
1702 case ISD::SETUO: CondCode = A64CC::VS; break;
1703 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
1704 case ISD::SETUGT: CondCode = A64CC::HI; break;
1705 case ISD::SETUGE: CondCode = A64CC::PL; break;
1707 case ISD::SETULT: CondCode = A64CC::LT; break;
1709 case ISD::SETULE: CondCode = A64CC::LE; break;
1711 case ISD::SETUNE: CondCode = A64CC::NE; break;
1717 AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1719 EVT PtrVT = getPointerTy();
1720 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1722 switch(getTargetMachine().getCodeModel()) {
1723 case CodeModel::Small:
1724 // The most efficient code is PC-relative anyway for the small memory model,
1725 // so we don't need to worry about relocation model.
1726 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
1727 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1728 AArch64II::MO_NO_FLAG),
1729 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1730 AArch64II::MO_LO12),
1731 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
1732 case CodeModel::Large:
1734 AArch64ISD::WrapperLarge, DL, PtrVT,
1735 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
1736 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
1737 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
1738 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
1740 llvm_unreachable("Only small and large code models supported now");
1745 // (BRCOND chain, val, dest)
1747 AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1749 SDValue Chain = Op.getOperand(0);
1750 SDValue TheBit = Op.getOperand(1);
1751 SDValue DestBB = Op.getOperand(2);
1753 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
1754 // that as the consumer we are responsible for ignoring rubbish in higher
1756 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
1757 DAG.getConstant(1, MVT::i32));
1759 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
1760 DAG.getConstant(0, TheBit.getValueType()),
1761 DAG.getCondCode(ISD::SETNE));
1763 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
1764 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
1768 // (BR_CC chain, condcode, lhs, rhs, dest)
1770 AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1772 SDValue Chain = Op.getOperand(0);
1773 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1774 SDValue LHS = Op.getOperand(2);
1775 SDValue RHS = Op.getOperand(3);
1776 SDValue DestBB = Op.getOperand(4);
1778 if (LHS.getValueType() == MVT::f128) {
1779 // f128 comparisons are lowered to runtime calls by a routine which sets
1780 // LHS, RHS and CC appropriately for the rest of this function to continue.
1781 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
1783 // If softenSetCCOperands returned a scalar, we need to compare the result
1784 // against zero to select between true and false values.
1785 if (RHS.getNode() == 0) {
1786 RHS = DAG.getConstant(0, LHS.getValueType());
1791 if (LHS.getValueType().isInteger()) {
1794 // Integers are handled in a separate function because the combinations of
1795 // immediates and tests can get hairy and we may want to fiddle things.
1796 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
1798 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1799 Chain, CmpOp, A64cc, DestBB);
1802 // Note that some LLVM floating-point CondCodes can't be lowered to a single
1803 // conditional branch, hence FPCCToA64CC can set a second test, where either
1804 // passing is sufficient.
1805 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
1806 CondCode = FPCCToA64CC(CC, Alternative);
1807 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
1808 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1809 DAG.getCondCode(CC));
1810 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1811 Chain, SetCC, A64cc, DestBB);
1813 if (Alternative != A64CC::Invalid) {
1814 A64cc = DAG.getConstant(Alternative, MVT::i32);
1815 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1816 A64BR_CC, SetCC, A64cc, DestBB);
1824 AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
1825 RTLIB::Libcall Call) const {
1828 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
1829 EVT ArgVT = Op.getOperand(i).getValueType();
1830 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1831 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
1832 Entry.isSExt = false;
1833 Entry.isZExt = false;
1834 Args.push_back(Entry);
1836 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
1838 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
1840 // By default, the input chain to this libcall is the entry node of the
1841 // function. If the libcall is going to be emitted as a tail call then
1842 // isUsedByReturnOnly will change it to the right chain if the return
1843 // node which is being folded has a non-entry input chain.
1844 SDValue InChain = DAG.getEntryNode();
1846 // isTailCall may be true since the callee does not reference caller stack
1847 // frame. Check if it's in the right position.
1848 SDValue TCChain = InChain;
1849 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
1854 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
1855 0, getLibcallCallingConv(Call), isTailCall,
1856 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1857 Callee, Args, DAG, SDLoc(Op));
1858 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1860 if (!CallInfo.second.getNode())
1861 // It's a tailcall, return the chain (which is the DAG root).
1862 return DAG.getRoot();
1864 return CallInfo.first;
1868 AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
1869 if (Op.getOperand(0).getValueType() != MVT::f128) {
1870 // It's legal except when f128 is involved
1875 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
1877 SDValue SrcVal = Op.getOperand(0);
1878 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
1879 /*isSigned*/ false, SDLoc(Op)).first;
1883 AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
1884 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
1887 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
1889 return LowerF128ToCall(Op, DAG, LC);
1893 AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1894 bool IsSigned) const {
1895 if (Op.getOperand(0).getValueType() != MVT::f128) {
1896 // It's legal except when f128 is involved
1902 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
1904 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
1906 return LowerF128ToCall(Op, DAG, LC);
1910 AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
1911 SelectionDAG &DAG) const {
1912 assert(getTargetMachine().getCodeModel() == CodeModel::Large);
1913 assert(getTargetMachine().getRelocationModel() == Reloc::Static);
1915 EVT PtrVT = getPointerTy();
1917 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
1918 const GlobalValue *GV = GN->getGlobal();
1920 SDValue GlobalAddr = DAG.getNode(
1921 AArch64ISD::WrapperLarge, dl, PtrVT,
1922 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
1923 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
1924 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
1925 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
1927 if (GN->getOffset() != 0)
1928 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
1929 DAG.getConstant(GN->getOffset(), PtrVT));
1935 AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
1936 SelectionDAG &DAG) const {
1937 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
1939 EVT PtrVT = getPointerTy();
1941 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
1942 const GlobalValue *GV = GN->getGlobal();
1943 unsigned Alignment = GV->getAlignment();
1944 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1945 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
1946 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
1947 // to zero when they remain undefined. In PIC mode the GOT can take care of
1948 // this, but in absolute mode we use a constant pool load.
1950 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
1951 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
1952 AArch64II::MO_NO_FLAG),
1953 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
1954 AArch64II::MO_LO12),
1955 DAG.getConstant(8, MVT::i32));
1956 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
1957 MachinePointerInfo::getConstantPool(),
1958 /*isVolatile=*/ false,
1959 /*isNonTemporal=*/ true,
1960 /*isInvariant=*/ true, 8);
1961 if (GN->getOffset() != 0)
1962 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
1963 DAG.getConstant(GN->getOffset(), PtrVT));
1968 if (Alignment == 0) {
1969 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
1970 if (GVPtrTy->getElementType()->isSized()) {
1972 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
1974 // Be conservative if we can't guess, not that it really matters:
1975 // functions and labels aren't valid for loads, and the methods used to
1976 // actually calculate an address work with any alignment.
1981 unsigned char HiFixup, LoFixup;
1982 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
1985 HiFixup = AArch64II::MO_GOT;
1986 LoFixup = AArch64II::MO_GOT_LO12;
1989 HiFixup = AArch64II::MO_NO_FLAG;
1990 LoFixup = AArch64II::MO_LO12;
1993 // AArch64's small model demands the following sequence:
1994 // ADRP x0, somewhere
1995 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
1996 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
1997 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1999 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2001 DAG.getConstant(Alignment, MVT::i32));
2004 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
2008 if (GN->getOffset() != 0)
2009 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
2010 DAG.getConstant(GN->getOffset(), PtrVT));
2016 AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
2017 SelectionDAG &DAG) const {
2018 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
2019 // we make those distinctions here.
2021 switch (getTargetMachine().getCodeModel()) {
2022 case CodeModel::Small:
2023 return LowerGlobalAddressELFSmall(Op, DAG);
2024 case CodeModel::Large:
2025 return LowerGlobalAddressELFLarge(Op, DAG);
2027 llvm_unreachable("Only small and large code models supported now");
2031 SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
2034 SelectionDAG &DAG) const {
2035 EVT PtrVT = getPointerTy();
2037 // The function we need to call is simply the first entry in the GOT for this
2038 // descriptor, load it in preparation.
2039 SDValue Func, Chain;
2040 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2043 // The function takes only one argument: the address of the descriptor itself
2046 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
2047 Glue = Chain.getValue(1);
2049 // Finally, there's a special calling-convention which means that the lookup
2050 // must preserve all registers (except X0, obviously).
2051 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2052 const AArch64RegisterInfo *A64RI
2053 = static_cast<const AArch64RegisterInfo *>(TRI);
2054 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2056 // We're now ready to populate the argument list, as with a normal call:
2057 std::vector<SDValue> Ops;
2058 Ops.push_back(Chain);
2059 Ops.push_back(Func);
2060 Ops.push_back(SymAddr);
2061 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2062 Ops.push_back(DAG.getRegisterMask(Mask));
2063 Ops.push_back(Glue);
2065 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2066 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2068 Glue = Chain.getValue(1);
2070 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2071 // back to the generic handling code.
2072 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2076 AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2077 SelectionDAG &DAG) const {
2078 assert(getSubtarget()->isTargetELF() &&
2079 "TLS not implemented for non-ELF targets");
2080 assert(getTargetMachine().getCodeModel() == CodeModel::Small
2081 && "TLS only supported in small memory model");
2082 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2084 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2087 EVT PtrVT = getPointerTy();
2089 const GlobalValue *GV = GA->getGlobal();
2091 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2093 if (Model == TLSModel::InitialExec) {
2094 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2095 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2096 AArch64II::MO_GOTTPREL),
2097 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2098 AArch64II::MO_GOTTPREL_LO12),
2099 DAG.getConstant(8, MVT::i32));
2100 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2102 } else if (Model == TLSModel::LocalExec) {
2103 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2104 AArch64II::MO_TPREL_G1);
2105 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2106 AArch64II::MO_TPREL_G0_NC);
2108 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2109 DAG.getTargetConstant(1, MVT::i32)), 0);
2110 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2112 DAG.getTargetConstant(0, MVT::i32)), 0);
2113 } else if (Model == TLSModel::GeneralDynamic) {
2114 // Accesses used in this sequence go via the TLS descriptor which lives in
2115 // the GOT. Prepare an address we can use to handle this.
2116 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2117 AArch64II::MO_TLSDESC);
2118 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2119 AArch64II::MO_TLSDESC_LO12);
2120 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2122 DAG.getConstant(8, MVT::i32));
2123 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2125 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2126 } else if (Model == TLSModel::LocalDynamic) {
2127 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2128 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2129 // the beginning of the module's TLS region, followed by a DTPREL offset
2132 // These accesses will need deduplicating if there's more than one.
2133 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2134 .getInfo<AArch64MachineFunctionInfo>();
2135 MFI->incNumLocalDynamicTLSAccesses();
2138 // Get the location of _TLS_MODULE_BASE_:
2139 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2140 AArch64II::MO_TLSDESC);
2141 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2142 AArch64II::MO_TLSDESC_LO12);
2143 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2145 DAG.getConstant(8, MVT::i32));
2146 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2148 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2150 // Get the variable's offset from _TLS_MODULE_BASE_
2151 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2152 AArch64II::MO_DTPREL_G1);
2153 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2154 AArch64II::MO_DTPREL_G0_NC);
2156 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2157 DAG.getTargetConstant(0, MVT::i32)), 0);
2158 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2160 DAG.getTargetConstant(0, MVT::i32)), 0);
2162 llvm_unreachable("Unsupported TLS access model");
2165 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2169 AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2170 bool IsSigned) const {
2171 if (Op.getValueType() != MVT::f128) {
2172 // Legal for everything except f128.
2178 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2180 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2182 return LowerF128ToCall(Op, DAG, LC);
2187 AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2188 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2190 EVT PtrVT = getPointerTy();
2192 // When compiling PIC, jump tables get put in the code section so a static
2193 // relocation-style is acceptable for both cases.
2194 switch (getTargetMachine().getCodeModel()) {
2195 case CodeModel::Small:
2196 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2197 DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2198 DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2199 AArch64II::MO_LO12),
2200 DAG.getConstant(1, MVT::i32));
2201 case CodeModel::Large:
2203 AArch64ISD::WrapperLarge, dl, PtrVT,
2204 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2205 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2206 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2207 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2209 llvm_unreachable("Only small and large code models supported now");
2213 // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
2215 AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2217 SDValue LHS = Op.getOperand(0);
2218 SDValue RHS = Op.getOperand(1);
2219 SDValue IfTrue = Op.getOperand(2);
2220 SDValue IfFalse = Op.getOperand(3);
2221 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2223 if (LHS.getValueType() == MVT::f128) {
2224 // f128 comparisons are lowered to libcalls, but slot in nicely here
2226 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2228 // If softenSetCCOperands returned a scalar, we need to compare the result
2229 // against zero to select between true and false values.
2230 if (RHS.getNode() == 0) {
2231 RHS = DAG.getConstant(0, LHS.getValueType());
2236 if (LHS.getValueType().isInteger()) {
2239 // Integers are handled in a separate function because the combinations of
2240 // immediates and tests can get hairy and we may want to fiddle things.
2241 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2243 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2244 CmpOp, IfTrue, IfFalse, A64cc);
2247 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2248 // conditional branch, hence FPCCToA64CC can set a second test, where either
2249 // passing is sufficient.
2250 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2251 CondCode = FPCCToA64CC(CC, Alternative);
2252 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2253 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2254 DAG.getCondCode(CC));
2255 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
2257 SetCC, IfTrue, IfFalse, A64cc);
2259 if (Alternative != A64CC::Invalid) {
2260 A64cc = DAG.getConstant(Alternative, MVT::i32);
2261 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2262 SetCC, IfTrue, A64SELECT_CC, A64cc);
2266 return A64SELECT_CC;
2269 // (SELECT testbit, iftrue, iffalse)
2271 AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2273 SDValue TheBit = Op.getOperand(0);
2274 SDValue IfTrue = Op.getOperand(1);
2275 SDValue IfFalse = Op.getOperand(2);
2277 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2278 // that as the consumer we are responsible for ignoring rubbish in higher
2280 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2281 DAG.getConstant(1, MVT::i32));
2282 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2283 DAG.getConstant(0, TheBit.getValueType()),
2284 DAG.getCondCode(ISD::SETNE));
2286 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2287 A64CMP, IfTrue, IfFalse,
2288 DAG.getConstant(A64CC::NE, MVT::i32));
2291 static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
2293 SDValue LHS = Op.getOperand(0);
2294 SDValue RHS = Op.getOperand(1);
2295 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2296 EVT VT = Op.getValueType();
2297 bool Invert = false;
2301 if (LHS.getValueType().isInteger()) {
2303 // Attempt to use Vector Integer Compare Mask Test instruction.
2304 // TST = icmp ne (and (op0, op1), zero).
2305 if (CC == ISD::SETNE) {
2306 if (((LHS.getOpcode() == ISD::AND) &&
2307 ISD::isBuildVectorAllZeros(RHS.getNode())) ||
2308 ((RHS.getOpcode() == ISD::AND) &&
2309 ISD::isBuildVectorAllZeros(LHS.getNode()))) {
2311 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
2312 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
2313 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
2314 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
2318 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
2319 // Note: Compare against Zero does not support unsigned predicates.
2320 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2321 ISD::isBuildVectorAllZeros(LHS.getNode())) &&
2322 !isUnsignedIntSetCC(CC)) {
2324 // If LHS is the zero value, swap operands and CondCode.
2325 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2326 CC = getSetCCSwappedOperands(CC);
2331 // Ensure valid CondCode for Compare Mask against Zero instruction:
2332 // EQ, GE, GT, LE, LT.
2333 if (ISD::SETNE == CC) {
2338 // Using constant type to differentiate integer and FP compares with zero.
2339 Op1 = DAG.getConstant(0, MVT::i32);
2340 Opcode = AArch64ISD::NEON_CMPZ;
2343 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
2344 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
2348 llvm_unreachable("Illegal integer comparison.");
2364 CC = getSetCCSwappedOperands(CC);
2368 std::swap(LHS, RHS);
2370 Opcode = AArch64ISD::NEON_CMP;
2375 // Generate Compare Mask instr or Compare Mask against Zero instr.
2377 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2380 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2385 // Now handle Floating Point cases.
2386 // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
2387 if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2388 ISD::isBuildVectorAllZeros(LHS.getNode())) {
2390 // If LHS is the zero value, swap operands and CondCode.
2391 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2392 CC = getSetCCSwappedOperands(CC);
2397 // Using constant type to differentiate integer and FP compares with zero.
2398 Op1 = DAG.getConstantFP(0, MVT::f32);
2399 Opcode = AArch64ISD::NEON_CMPZ;
2401 // Attempt to use Vector Floating Point Compare Mask instruction.
2404 Opcode = AArch64ISD::NEON_CMP;
2408 // Some register compares have to be implemented with swapped CC and operands,
2409 // e.g.: OLT implemented as OGT with swapped operands.
2410 bool SwapIfRegArgs = false;
2412 // Ensure valid CondCode for FP Compare Mask against Zero instruction:
2413 // EQ, GE, GT, LE, LT.
2414 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
2417 llvm_unreachable("Illegal FP comparison");
2420 Invert = true; // Fallthrough
2428 SwapIfRegArgs = true;
2437 SwapIfRegArgs = true;
2446 SwapIfRegArgs = true;
2455 SwapIfRegArgs = true;
2462 Invert = true; // Fallthrough
2464 // Expand this to (OGT |OLT).
2466 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
2468 SwapIfRegArgs = true;
2471 Invert = true; // Fallthrough
2473 // Expand this to (OGE | OLT).
2475 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
2477 SwapIfRegArgs = true;
2481 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
2482 CC = getSetCCSwappedOperands(CC);
2483 std::swap(Op0, Op1);
2486 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
2487 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2489 if (NeonCmpAlt.getNode())
2490 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
2493 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2498 // (SETCC lhs, rhs, condcode)
2500 AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2502 SDValue LHS = Op.getOperand(0);
2503 SDValue RHS = Op.getOperand(1);
2504 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2505 EVT VT = Op.getValueType();
2508 return LowerVectorSETCC(Op, DAG);
2510 if (LHS.getValueType() == MVT::f128) {
2511 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
2512 // for the rest of the function (some i32 or i64 values).
2513 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2515 // If softenSetCCOperands returned a scalar, use it.
2516 if (RHS.getNode() == 0) {
2517 assert(LHS.getValueType() == Op.getValueType() &&
2518 "Unexpected setcc expansion!");
2523 if (LHS.getValueType().isInteger()) {
2526 // Integers are handled in a separate function because the combinations of
2527 // immediates and tests can get hairy and we may want to fiddle things.
2528 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2530 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2531 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
2535 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2536 // conditional branch, hence FPCCToA64CC can set a second test, where either
2537 // passing is sufficient.
2538 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2539 CondCode = FPCCToA64CC(CC, Alternative);
2540 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2541 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2542 DAG.getCondCode(CC));
2543 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2544 CmpOp, DAG.getConstant(1, VT),
2545 DAG.getConstant(0, VT), A64cc);
2547 if (Alternative != A64CC::Invalid) {
2548 A64cc = DAG.getConstant(Alternative, MVT::i32);
2549 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
2550 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
2553 return A64SELECT_CC;
2557 AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2558 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2559 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2561 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
2562 // rather than just 8.
2563 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
2564 Op.getOperand(1), Op.getOperand(2),
2565 DAG.getConstant(32, MVT::i32), 8, false, false,
2566 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
2570 AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2571 // The layout of the va_list struct is specified in the AArch64 Procedure Call
2572 // Standard, section B.3.
2573 MachineFunction &MF = DAG.getMachineFunction();
2574 AArch64MachineFunctionInfo *FuncInfo
2575 = MF.getInfo<AArch64MachineFunctionInfo>();
2578 SDValue Chain = Op.getOperand(0);
2579 SDValue VAList = Op.getOperand(1);
2580 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2581 SmallVector<SDValue, 4> MemOps;
2583 // void *__stack at offset 0
2584 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
2586 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
2587 MachinePointerInfo(SV), false, false, 0));
2589 // void *__gr_top at offset 8
2590 int GPRSize = FuncInfo->getVariadicGPRSize();
2592 SDValue GRTop, GRTopAddr;
2594 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2595 DAG.getConstant(8, getPointerTy()));
2597 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
2598 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
2599 DAG.getConstant(GPRSize, getPointerTy()));
2601 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
2602 MachinePointerInfo(SV, 8),
2606 // void *__vr_top at offset 16
2607 int FPRSize = FuncInfo->getVariadicFPRSize();
2609 SDValue VRTop, VRTopAddr;
2610 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2611 DAG.getConstant(16, getPointerTy()));
2613 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
2614 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
2615 DAG.getConstant(FPRSize, getPointerTy()));
2617 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
2618 MachinePointerInfo(SV, 16),
2622 // int __gr_offs at offset 24
2623 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2624 DAG.getConstant(24, getPointerTy()));
2625 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
2626 GROffsAddr, MachinePointerInfo(SV, 24),
2629 // int __vr_offs at offset 28
2630 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2631 DAG.getConstant(28, getPointerTy()));
2632 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
2633 VROffsAddr, MachinePointerInfo(SV, 28),
2636 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
2641 AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2642 switch (Op.getOpcode()) {
2643 default: llvm_unreachable("Don't know how to custom lower this!");
2644 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
2645 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
2646 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
2647 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
2648 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
2649 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
2650 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
2651 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
2652 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
2653 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
2655 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2656 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2657 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
2658 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
2659 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2660 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2661 case ISD::SELECT: return LowerSELECT(Op, DAG);
2662 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2663 case ISD::SETCC: return LowerSETCC(Op, DAG);
2664 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
2665 case ISD::VASTART: return LowerVASTART(Op, DAG);
2666 case ISD::BUILD_VECTOR:
2667 return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
2673 /// Check if the specified splat value corresponds to a valid vector constant
2674 /// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
2675 /// so, return the encoded 8-bit immediate and the OpCmode instruction fields
2677 static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
2678 unsigned SplatBitSize, SelectionDAG &DAG,
2679 bool is128Bits, NeonModImmType type, EVT &VT,
2680 unsigned &Imm, unsigned &OpCmode) {
2681 switch (SplatBitSize) {
2683 llvm_unreachable("unexpected size for isNeonModifiedImm");
2685 if (type != Neon_Mov_Imm)
2687 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
2688 // Neon movi per byte: Op=0, Cmode=1110.
2691 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
2695 // Neon move inst per halfword
2696 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
2697 if ((SplatBits & ~0xff) == 0) {
2698 // Value = 0x00nn is 0x00nn LSL 0
2699 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
2700 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
2706 if ((SplatBits & ~0xff00) == 0) {
2707 // Value = 0xnn00 is 0x00nn LSL 8
2708 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
2709 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
2711 Imm = SplatBits >> 8;
2715 // can't handle any other
2720 // First the LSL variants (MSL is unusable by some interested instructions).
2722 // Neon move instr per word, shift zeros
2723 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
2724 if ((SplatBits & ~0xff) == 0) {
2725 // Value = 0x000000nn is 0x000000nn LSL 0
2726 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
2727 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
2733 if ((SplatBits & ~0xff00) == 0) {
2734 // Value = 0x0000nn00 is 0x000000nn LSL 8
2735 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
2736 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
2738 Imm = SplatBits >> 8;
2742 if ((SplatBits & ~0xff0000) == 0) {
2743 // Value = 0x00nn0000 is 0x000000nn LSL 16
2744 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
2745 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
2747 Imm = SplatBits >> 16;
2751 if ((SplatBits & ~0xff000000) == 0) {
2752 // Value = 0xnn000000 is 0x000000nn LSL 24
2753 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
2754 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
2756 Imm = SplatBits >> 24;
2761 // Now the MSL immediates.
2763 // Neon move instr per word, shift ones
2764 if ((SplatBits & ~0xffff) == 0 &&
2765 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
2766 // Value = 0x0000nnff is 0x000000nn MSL 8
2767 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
2769 Imm = SplatBits >> 8;
2773 if ((SplatBits & ~0xffffff) == 0 &&
2774 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
2775 // Value = 0x00nnffff is 0x000000nn MSL 16
2776 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
2778 Imm = SplatBits >> 16;
2782 // can't handle any other
2787 if (type != Neon_Mov_Imm)
2789 // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
2790 // movi Op=1, Cmode=1110.
2792 uint64_t BitMask = 0xff;
2794 unsigned ImmMask = 1;
2796 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
2797 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
2800 } else if ((SplatBits & BitMask) != 0) {
2807 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
2815 static SDValue PerformANDCombine(SDNode *N,
2816 TargetLowering::DAGCombinerInfo &DCI) {
2818 SelectionDAG &DAG = DCI.DAG;
2820 EVT VT = N->getValueType(0);
2822 // We're looking for an SRA/SHL pair which form an SBFX.
2824 if (VT != MVT::i32 && VT != MVT::i64)
2827 if (!isa<ConstantSDNode>(N->getOperand(1)))
2830 uint64_t TruncMask = N->getConstantOperandVal(1);
2831 if (!isMask_64(TruncMask))
2834 uint64_t Width = CountPopulation_64(TruncMask);
2835 SDValue Shift = N->getOperand(0);
2837 if (Shift.getOpcode() != ISD::SRL)
2840 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
2842 uint64_t LSB = Shift->getConstantOperandVal(1);
2844 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
2847 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
2848 DAG.getConstant(LSB, MVT::i64),
2849 DAG.getConstant(LSB + Width - 1, MVT::i64));
2852 /// For a true bitfield insert, the bits getting into that contiguous mask
2853 /// should come from the low part of an existing value: they must be formed from
2854 /// a compatible SHL operation (unless they're already low). This function
2855 /// checks that condition and returns the least-significant bit that's
2856 /// intended. If the operation not a field preparation, -1 is returned.
2857 static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
2858 SDValue &MaskedVal, uint64_t Mask) {
2859 if (!isShiftedMask_64(Mask))
2862 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
2863 // instruction. BFI will do a left-shift by LSB before applying the mask we've
2864 // spotted, so in general we should pre-emptively "undo" that by making sure
2865 // the incoming bits have had a right-shift applied to them.
2867 // This right shift, however, will combine with existing left/right shifts. In
2868 // the simplest case of a completely straight bitfield operation, it will be
2869 // expected to completely cancel out with an existing SHL. More complicated
2870 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
2873 uint64_t LSB = countTrailingZeros(Mask);
2874 int64_t ShiftRightRequired = LSB;
2875 if (MaskedVal.getOpcode() == ISD::SHL &&
2876 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2877 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
2878 MaskedVal = MaskedVal.getOperand(0);
2879 } else if (MaskedVal.getOpcode() == ISD::SRL &&
2880 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2881 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
2882 MaskedVal = MaskedVal.getOperand(0);
2885 if (ShiftRightRequired > 0)
2886 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
2887 DAG.getConstant(ShiftRightRequired, MVT::i64));
2888 else if (ShiftRightRequired < 0) {
2889 // We could actually end up with a residual left shift, for example with
2890 // "struc.bitfield = val << 1".
2891 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
2892 DAG.getConstant(-ShiftRightRequired, MVT::i64));
2898 /// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
2899 /// a mask and an extension. Returns true if a BFI was found and provides
2900 /// information on its surroundings.
2901 static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
2904 if (N.getOpcode() == ISD::ZERO_EXTEND) {
2906 N = N.getOperand(0);
2909 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2910 Mask = N->getConstantOperandVal(1);
2911 N = N.getOperand(0);
2913 // Mask is the whole width.
2914 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
2917 if (N.getOpcode() == AArch64ISD::BFI) {
2925 /// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
2926 /// is roughly equivalent to (and (BFI ...), mask). This form is used because it
2927 /// can often be further combined with a larger mask. Ultimately, we want mask
2928 /// to be 2^32-1 or 2^64-1 so the AND can be skipped.
2929 static SDValue tryCombineToBFI(SDNode *N,
2930 TargetLowering::DAGCombinerInfo &DCI,
2931 const AArch64Subtarget *Subtarget) {
2932 SelectionDAG &DAG = DCI.DAG;
2934 EVT VT = N->getValueType(0);
2936 assert(N->getOpcode() == ISD::OR && "Unexpected root");
2938 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
2939 // abandon the effort.
2940 SDValue LHS = N->getOperand(0);
2941 if (LHS.getOpcode() != ISD::AND)
2945 if (isa<ConstantSDNode>(LHS.getOperand(1)))
2946 LHSMask = LHS->getConstantOperandVal(1);
2950 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
2951 // is or abandon the effort.
2952 SDValue RHS = N->getOperand(1);
2953 if (RHS.getOpcode() != ISD::AND)
2957 if (isa<ConstantSDNode>(RHS.getOperand(1)))
2958 RHSMask = RHS->getConstantOperandVal(1);
2962 // Can't do anything if the masks are incompatible.
2963 if (LHSMask & RHSMask)
2966 // Now we need one of the masks to be a contiguous field. Without loss of
2967 // generality that should be the RHS one.
2968 SDValue Bitfield = LHS.getOperand(0);
2969 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
2970 // We know that LHS is a candidate new value, and RHS isn't already a better
2972 std::swap(LHS, RHS);
2973 std::swap(LHSMask, RHSMask);
2976 // We've done our best to put the right operands in the right places, all we
2977 // can do now is check whether a BFI exists.
2978 Bitfield = RHS.getOperand(0);
2979 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
2983 uint32_t Width = CountPopulation_64(RHSMask);
2984 assert(Width && "Expected non-zero bitfield width");
2986 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
2987 LHS.getOperand(0), Bitfield,
2988 DAG.getConstant(LSB, MVT::i64),
2989 DAG.getConstant(Width, MVT::i64));
2992 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
2995 return DAG.getNode(ISD::AND, DL, VT, BFI,
2996 DAG.getConstant(LHSMask | RHSMask, VT));
2999 /// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
3000 /// original input. This is surprisingly common because SROA splits things up
3001 /// into i8 chunks, so the originally detected MaskedBFI may actually only act
3002 /// on the low (say) byte of a word. This is then orred into the rest of the
3003 /// word afterwards.
3005 /// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
3007 /// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
3008 /// MaskedBFI. We can also deal with a certain amount of extend/truncate being
3010 static SDValue tryCombineToLargerBFI(SDNode *N,
3011 TargetLowering::DAGCombinerInfo &DCI,
3012 const AArch64Subtarget *Subtarget) {
3013 SelectionDAG &DAG = DCI.DAG;
3015 EVT VT = N->getValueType(0);
3017 // First job is to hunt for a MaskedBFI on either the left or right. Swap
3018 // operands if it's actually on the right.
3020 SDValue PossExtraMask;
3021 uint64_t ExistingMask = 0;
3022 bool Extended = false;
3023 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
3024 PossExtraMask = N->getOperand(1);
3025 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
3026 PossExtraMask = N->getOperand(0);
3030 // We can only combine a BFI with another compatible mask.
3031 if (PossExtraMask.getOpcode() != ISD::AND ||
3032 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
3035 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
3037 // Masks must be compatible.
3038 if (ExtraMask & ExistingMask)
3041 SDValue OldBFIVal = BFI.getOperand(0);
3042 SDValue NewBFIVal = BFI.getOperand(1);
3044 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
3045 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
3046 // need to be made compatible.
3047 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
3048 && "Invalid types for BFI");
3049 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
3050 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
3053 // We need the MaskedBFI to be combined with a mask of the *same* value.
3054 if (PossExtraMask.getOperand(0) != OldBFIVal)
3057 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3058 OldBFIVal, NewBFIVal,
3059 BFI.getOperand(2), BFI.getOperand(3));
3061 // If the masking is trivial, we don't need to create it.
3062 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3065 return DAG.getNode(ISD::AND, DL, VT, BFI,
3066 DAG.getConstant(ExtraMask | ExistingMask, VT));
3069 /// An EXTR instruction is made up of two shifts, ORed together. This helper
3070 /// searches for and classifies those shifts.
3071 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
3073 if (N.getOpcode() == ISD::SHL)
3075 else if (N.getOpcode() == ISD::SRL)
3080 if (!isa<ConstantSDNode>(N.getOperand(1)))
3083 ShiftAmount = N->getConstantOperandVal(1);
3084 Src = N->getOperand(0);
3088 /// EXTR instruction extracts a contiguous chunk of bits from two existing
3089 /// registers viewed as a high/low pair. This function looks for the pattern:
3090 /// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
3091 /// EXTR. Can't quite be done in TableGen because the two immediates aren't
3093 static SDValue tryCombineToEXTR(SDNode *N,
3094 TargetLowering::DAGCombinerInfo &DCI) {
3095 SelectionDAG &DAG = DCI.DAG;
3097 EVT VT = N->getValueType(0);
3099 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3101 if (VT != MVT::i32 && VT != MVT::i64)
3105 uint32_t ShiftLHS = 0;
3107 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
3111 uint32_t ShiftRHS = 0;
3113 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
3116 // If they're both trying to come from the high part of the register, they're
3117 // not really an EXTR.
3118 if (LHSFromHi == RHSFromHi)
3121 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
3125 std::swap(LHS, RHS);
3126 std::swap(ShiftLHS, ShiftRHS);
3129 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
3131 DAG.getConstant(ShiftRHS, MVT::i64));
3134 /// Target-specific dag combine xforms for ISD::OR
3135 static SDValue PerformORCombine(SDNode *N,
3136 TargetLowering::DAGCombinerInfo &DCI,
3137 const AArch64Subtarget *Subtarget) {
3139 SelectionDAG &DAG = DCI.DAG;
3141 EVT VT = N->getValueType(0);
3143 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3146 // Attempt to recognise bitfield-insert operations.
3147 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
3151 // Attempt to combine an existing MaskedBFI operation into one with a larger
3153 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
3157 Res = tryCombineToEXTR(N, DCI);
3161 if (!Subtarget->hasNEON())
3164 // Attempt to use vector immediate-form BSL
3165 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
3167 SDValue N0 = N->getOperand(0);
3168 if (N0.getOpcode() != ISD::AND)
3171 SDValue N1 = N->getOperand(1);
3172 if (N1.getOpcode() != ISD::AND)
3175 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
3177 unsigned SplatBitSize;
3179 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
3181 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3184 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
3186 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3188 !HasAnyUndefs && SplatBits0 == ~SplatBits1) {
3189 // Canonicalize the vector type to make instruction selection simpler.
3190 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8;
3191 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT,
3192 N0->getOperand(1), N0->getOperand(0),
3194 return DAG.getNode(ISD::BITCAST, DL, VT, Result);
3202 /// Target-specific dag combine xforms for ISD::SRA
3203 static SDValue PerformSRACombine(SDNode *N,
3204 TargetLowering::DAGCombinerInfo &DCI) {
3206 SelectionDAG &DAG = DCI.DAG;
3208 EVT VT = N->getValueType(0);
3210 // We're looking for an SRA/SHL pair which form an SBFX.
3212 if (VT != MVT::i32 && VT != MVT::i64)
3215 if (!isa<ConstantSDNode>(N->getOperand(1)))
3218 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
3219 SDValue Shift = N->getOperand(0);
3221 if (Shift.getOpcode() != ISD::SHL)
3224 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3227 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
3228 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
3229 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
3231 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3234 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
3235 DAG.getConstant(LSB, MVT::i64),
3236 DAG.getConstant(LSB + Width - 1, MVT::i64));
3239 /// Check if this is a valid build_vector for the immediate operand of
3240 /// a vector shift operation, where all the elements of the build_vector
3241 /// must have the same constant integer value.
3242 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3243 // Ignore bit_converts.
3244 while (Op.getOpcode() == ISD::BITCAST)
3245 Op = Op.getOperand(0);
3246 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3247 APInt SplatBits, SplatUndef;
3248 unsigned SplatBitSize;
3250 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3251 HasAnyUndefs, ElementBits) ||
3252 SplatBitSize > ElementBits)
3254 Cnt = SplatBits.getSExtValue();
3258 /// Check if this is a valid build_vector for the immediate operand of
3259 /// a vector shift left operation. That value must be in the range:
3260 /// 0 <= Value < ElementBits for a left shift
3261 static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
3262 assert(VT.isVector() && "vector shift count is not a vector type");
3263 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3264 if (!getVShiftImm(Op, ElementBits, Cnt))
3266 return (Cnt >= 0 && Cnt < ElementBits);
3269 static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
3270 const AArch64Subtarget *ST) {
3271 SelectionDAG &DAG = DCI.DAG;
3272 EVT VT = N->getValueType(0);
3274 // Nothing to be done for scalar shifts.
3275 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3276 if (!VT.isVector() || !TLI.isTypeLegal(VT))
3279 assert(ST->hasNEON() && "unexpected vector shift");
3281 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
3282 SDValue RHS = DAG.getNode(AArch64ISD::NEON_DUPIMM, SDLoc(N->getOperand(0)),
3283 VT, DAG.getConstant(Cnt, MVT::i32));
3284 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
3291 AArch64TargetLowering::PerformDAGCombine(SDNode *N,
3292 DAGCombinerInfo &DCI) const {
3293 switch (N->getOpcode()) {
3295 case ISD::AND: return PerformANDCombine(N, DCI);
3296 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
3297 case ISD::SRA: return PerformSRACombine(N, DCI);
3298 case ISD::SHL: return PerformSHLCombine(N, DCI, getSubtarget());
3304 AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3305 VT = VT.getScalarType();
3310 switch (VT.getSimpleVT().SimpleTy) {
3324 // If this is a case we can't handle, return null and let the default
3325 // expansion code take care of it.
3327 AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3328 const AArch64Subtarget *ST) const {
3330 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
3332 EVT VT = Op.getValueType();
3334 APInt SplatBits, SplatUndef;
3335 unsigned SplatBitSize;
3338 // Note we favor lowering MOVI over MVNI.
3339 // This has implications on the definition of patterns in TableGen to select
3340 // BIC immediate instructions but not ORR immediate instructions.
3341 // If this lowering order is changed, TableGen patterns for BIC immediate and
3342 // ORR immediate instructions have to be updated.
3343 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3344 if (SplatBitSize <= 64) {
3345 // First attempt to use vector immediate-form MOVI
3348 unsigned OpCmode = 0;
3350 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
3351 SplatBitSize, DAG, VT.is128BitVector(),
3352 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
3353 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
3354 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
3356 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
3357 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
3358 ImmVal, OpCmodeVal);
3359 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
3363 // Then attempt to use vector immediate-form MVNI
3364 uint64_t NegatedImm = (~SplatBits).getZExtValue();
3365 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
3366 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
3368 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
3369 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
3370 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
3371 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
3372 ImmVal, OpCmodeVal);
3373 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
3377 // Attempt to use vector immediate-form FMOV
3378 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
3379 (VT == MVT::v2f64 && SplatBitSize == 64)) {
3381 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
3384 if (A64Imms::isFPImm(RealVal, ImmVal)) {
3385 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
3386 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
3394 AArch64TargetLowering::ConstraintType
3395 AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
3396 if (Constraint.size() == 1) {
3397 switch (Constraint[0]) {
3399 case 'w': // An FP/SIMD vector register
3400 return C_RegisterClass;
3401 case 'I': // Constant that can be used with an ADD instruction
3402 case 'J': // Constant that can be used with a SUB instruction
3403 case 'K': // Constant that can be used with a 32-bit logical instruction
3404 case 'L': // Constant that can be used with a 64-bit logical instruction
3405 case 'M': // Constant that can be used as a 32-bit MOV immediate
3406 case 'N': // Constant that can be used as a 64-bit MOV immediate
3407 case 'Y': // Floating point constant zero
3408 case 'Z': // Integer constant zero
3410 case 'Q': // A memory reference with base register and no offset
3412 case 'S': // A symbolic address
3417 // FIXME: Ump, Utf, Usa, Ush
3418 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
3419 // whatever they may be
3420 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
3421 // Usa: An absolute symbolic address
3422 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
3423 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
3424 && Constraint != "Ush" && "Unimplemented constraints");
3426 return TargetLowering::getConstraintType(Constraint);
3429 TargetLowering::ConstraintWeight
3430 AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
3431 const char *Constraint) const {
3433 llvm_unreachable("Constraint weight unimplemented");
3437 AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3438 std::string &Constraint,
3439 std::vector<SDValue> &Ops,
3440 SelectionDAG &DAG) const {
3441 SDValue Result(0, 0);
3443 // Only length 1 constraints are C_Other.
3444 if (Constraint.size() != 1) return;
3446 // Only C_Other constraints get lowered like this. That means constants for us
3447 // so return early if there's no hope the constraint can be lowered.
3449 switch(Constraint[0]) {
3451 case 'I': case 'J': case 'K': case 'L':
3452 case 'M': case 'N': case 'Z': {
3453 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
3457 uint64_t CVal = C->getZExtValue();
3460 switch (Constraint[0]) {
3462 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
3463 // is a peculiarly useless SUB constraint.
3464 llvm_unreachable("Unimplemented C_Other constraint");
3470 if (A64Imms::isLogicalImm(32, CVal, Bits))
3474 if (A64Imms::isLogicalImm(64, CVal, Bits))
3483 Result = DAG.getTargetConstant(CVal, Op.getValueType());
3487 // An absolute symbolic address or label reference.
3488 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
3489 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
3490 GA->getValueType(0));
3491 } else if (const BlockAddressSDNode *BA
3492 = dyn_cast<BlockAddressSDNode>(Op)) {
3493 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
3494 BA->getValueType(0));
3495 } else if (const ExternalSymbolSDNode *ES
3496 = dyn_cast<ExternalSymbolSDNode>(Op)) {
3497 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
3498 ES->getValueType(0));
3504 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
3505 if (CFP->isExactlyValue(0.0)) {
3506 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
3513 if (Result.getNode()) {
3514 Ops.push_back(Result);
3518 // It's an unknown constraint for us. Let generic code have a go.
3519 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3522 std::pair<unsigned, const TargetRegisterClass*>
3523 AArch64TargetLowering::getRegForInlineAsmConstraint(
3524 const std::string &Constraint,
3526 if (Constraint.size() == 1) {
3527 switch (Constraint[0]) {
3529 if (VT.getSizeInBits() <= 32)
3530 return std::make_pair(0U, &AArch64::GPR32RegClass);
3531 else if (VT == MVT::i64)
3532 return std::make_pair(0U, &AArch64::GPR64RegClass);
3536 return std::make_pair(0U, &AArch64::FPR16RegClass);
3537 else if (VT == MVT::f32)
3538 return std::make_pair(0U, &AArch64::FPR32RegClass);
3539 else if (VT == MVT::f64)
3540 return std::make_pair(0U, &AArch64::FPR64RegClass);
3541 else if (VT.getSizeInBits() == 64)
3542 return std::make_pair(0U, &AArch64::VPR64RegClass);
3543 else if (VT == MVT::f128)
3544 return std::make_pair(0U, &AArch64::FPR128RegClass);
3545 else if (VT.getSizeInBits() == 128)
3546 return std::make_pair(0U, &AArch64::VPR128RegClass);
3551 // Use the default implementation in TargetLowering to convert the register
3552 // constraint into a member of a register class.
3553 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);