1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "aarch64-isel"
17 #include "AArch64ISelLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "AArch64TargetObjectFile.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/CallingConv.h"
32 static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
35 if (Subtarget->isTargetLinux())
36 return new AArch64LinuxTargetObjectFile();
37 if (Subtarget->isTargetELF())
38 return new TargetLoweringObjectFileELF();
39 llvm_unreachable("unknown subtarget type");
43 AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
44 : TargetLowering(TM, createTLOF(TM)),
45 Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
46 RegInfo(TM.getRegisterInfo()),
47 Itins(TM.getInstrItineraryData()) {
49 // SIMD compares set the entire lane's bits to 1
50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
52 // Scalar register <-> type mapping
53 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
54 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
55 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
56 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
57 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
58 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
60 computeRegisterProperties();
62 // Some atomic operations can be folded into load-acquire or store-release
63 // instructions on AArch64. It's marginally simpler to let LLVM expand
64 // everything out to a barrier and then recombine the (few) barriers we can.
65 setInsertFencesForAtomic(true);
66 setTargetDAGCombine(ISD::ATOMIC_FENCE);
67 setTargetDAGCombine(ISD::ATOMIC_STORE);
69 // We combine OR nodes for bitfield and NEON BSL operations.
70 setTargetDAGCombine(ISD::OR);
72 setTargetDAGCombine(ISD::AND);
73 setTargetDAGCombine(ISD::SRA);
75 // AArch64 does not have i1 loads, or much of anything for i1 really.
76 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
77 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
78 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
80 setStackPointerRegisterToSaveRestore(AArch64::XSP);
81 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
82 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
83 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
85 // We'll lower globals to wrappers for selection.
86 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
87 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
89 // A64 instructions have the comparison predicate attached to the user of the
90 // result, but having a separate comparison is valuable for matching.
91 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
92 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
93 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
94 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
96 setOperationAction(ISD::SELECT, MVT::i32, Custom);
97 setOperationAction(ISD::SELECT, MVT::i64, Custom);
98 setOperationAction(ISD::SELECT, MVT::f32, Custom);
99 setOperationAction(ISD::SELECT, MVT::f64, Custom);
101 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
102 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
103 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
104 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
106 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
108 setOperationAction(ISD::SETCC, MVT::i32, Custom);
109 setOperationAction(ISD::SETCC, MVT::i64, Custom);
110 setOperationAction(ISD::SETCC, MVT::f32, Custom);
111 setOperationAction(ISD::SETCC, MVT::f64, Custom);
113 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
114 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
115 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
117 setOperationAction(ISD::VASTART, MVT::Other, Custom);
118 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
119 setOperationAction(ISD::VAEND, MVT::Other, Expand);
120 setOperationAction(ISD::VAARG, MVT::Other, Expand);
122 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
124 setOperationAction(ISD::ROTL, MVT::i32, Expand);
125 setOperationAction(ISD::ROTL, MVT::i64, Expand);
127 setOperationAction(ISD::UREM, MVT::i32, Expand);
128 setOperationAction(ISD::UREM, MVT::i64, Expand);
129 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
130 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
132 setOperationAction(ISD::SREM, MVT::i32, Expand);
133 setOperationAction(ISD::SREM, MVT::i64, Expand);
134 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
135 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
137 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
138 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
140 // Legal floating-point operations.
141 setOperationAction(ISD::FABS, MVT::f32, Legal);
142 setOperationAction(ISD::FABS, MVT::f64, Legal);
144 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
145 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
147 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
148 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
150 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
151 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
153 setOperationAction(ISD::FNEG, MVT::f32, Legal);
154 setOperationAction(ISD::FNEG, MVT::f64, Legal);
156 setOperationAction(ISD::FRINT, MVT::f32, Legal);
157 setOperationAction(ISD::FRINT, MVT::f64, Legal);
159 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
160 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
162 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
163 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
165 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
166 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
167 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
169 // Illegal floating-point operations.
170 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
171 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
173 setOperationAction(ISD::FCOS, MVT::f32, Expand);
174 setOperationAction(ISD::FCOS, MVT::f64, Expand);
176 setOperationAction(ISD::FEXP, MVT::f32, Expand);
177 setOperationAction(ISD::FEXP, MVT::f64, Expand);
179 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
180 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
182 setOperationAction(ISD::FLOG, MVT::f32, Expand);
183 setOperationAction(ISD::FLOG, MVT::f64, Expand);
185 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
186 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
188 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
189 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
191 setOperationAction(ISD::FPOW, MVT::f32, Expand);
192 setOperationAction(ISD::FPOW, MVT::f64, Expand);
194 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
195 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
197 setOperationAction(ISD::FREM, MVT::f32, Expand);
198 setOperationAction(ISD::FREM, MVT::f64, Expand);
200 setOperationAction(ISD::FSIN, MVT::f32, Expand);
201 setOperationAction(ISD::FSIN, MVT::f64, Expand);
204 // Virtually no operation on f128 is legal, but LLVM can't expand them when
205 // there's a valid register class, so we need custom operations in most cases.
206 setOperationAction(ISD::FABS, MVT::f128, Expand);
207 setOperationAction(ISD::FADD, MVT::f128, Custom);
208 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
209 setOperationAction(ISD::FCOS, MVT::f128, Expand);
210 setOperationAction(ISD::FDIV, MVT::f128, Custom);
211 setOperationAction(ISD::FMA, MVT::f128, Expand);
212 setOperationAction(ISD::FMUL, MVT::f128, Custom);
213 setOperationAction(ISD::FNEG, MVT::f128, Expand);
214 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
215 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
216 setOperationAction(ISD::FPOW, MVT::f128, Expand);
217 setOperationAction(ISD::FREM, MVT::f128, Expand);
218 setOperationAction(ISD::FRINT, MVT::f128, Expand);
219 setOperationAction(ISD::FSIN, MVT::f128, Expand);
220 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
221 setOperationAction(ISD::FSUB, MVT::f128, Custom);
222 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
223 setOperationAction(ISD::SETCC, MVT::f128, Custom);
224 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
225 setOperationAction(ISD::SELECT, MVT::f128, Expand);
226 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
227 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
229 // Lowering for many of the conversions is actually specified by the non-f128
230 // type. The LowerXXX function will be trivial when f128 isn't involved.
231 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
232 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
233 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
234 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
235 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
236 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
237 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
238 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
239 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
240 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
241 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
242 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
243 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
244 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
246 // This prevents LLVM trying to compress double constants into a floating
247 // constant-pool entry and trying to load from there. It's of doubtful benefit
248 // for A64: we'd need LDR followed by FCVT, I believe.
249 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
250 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
251 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
253 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
254 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
255 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
256 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
257 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
258 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
260 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
261 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
263 setExceptionPointerRegister(AArch64::X0);
264 setExceptionSelectorRegister(AArch64::X1);
267 EVT AArch64TargetLowering::getSetCCResultType(EVT VT) const {
268 // It's reasonably important that this value matches the "natural" legal
269 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
270 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
271 if (!VT.isVector()) return MVT::i32;
272 return VT.changeVectorElementTypeToInteger();
275 static void getExclusiveOperation(unsigned Size, unsigned &ldrOpc,
278 default: llvm_unreachable("unsupported size for atomic binary op!");
280 ldrOpc = AArch64::LDXR_byte;
281 strOpc = AArch64::STXR_byte;
284 ldrOpc = AArch64::LDXR_hword;
285 strOpc = AArch64::STXR_hword;
288 ldrOpc = AArch64::LDXR_word;
289 strOpc = AArch64::STXR_word;
292 ldrOpc = AArch64::LDXR_dword;
293 strOpc = AArch64::STXR_dword;
299 AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
301 unsigned BinOpcode) const {
302 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
303 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
305 const BasicBlock *LLVM_BB = BB->getBasicBlock();
306 MachineFunction *MF = BB->getParent();
307 MachineFunction::iterator It = BB;
310 unsigned dest = MI->getOperand(0).getReg();
311 unsigned ptr = MI->getOperand(1).getReg();
312 unsigned incr = MI->getOperand(2).getReg();
313 DebugLoc dl = MI->getDebugLoc();
315 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
317 unsigned ldrOpc, strOpc;
318 getExclusiveOperation(Size, ldrOpc, strOpc);
320 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
321 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
322 MF->insert(It, loopMBB);
323 MF->insert(It, exitMBB);
325 // Transfer the remainder of BB and its successor edges to exitMBB.
326 exitMBB->splice(exitMBB->begin(), BB,
327 llvm::next(MachineBasicBlock::iterator(MI)),
329 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
331 const TargetRegisterClass *TRC
332 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
333 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
337 // fallthrough --> loopMBB
338 BB->addSuccessor(loopMBB);
342 // <binop> scratch, dest, incr
343 // stxr stxr_status, scratch, ptr
344 // cbnz stxr_status, loopMBB
345 // fallthrough --> exitMBB
347 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
349 // All arithmetic operations we'll be creating are designed to take an extra
350 // shift or extend operand, which we can conveniently set to zero.
352 // Operand order needs to go the other way for NAND.
353 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
354 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
355 .addReg(incr).addReg(dest).addImm(0);
357 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
358 .addReg(dest).addReg(incr).addImm(0);
361 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
362 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
363 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
365 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
366 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
367 .addReg(stxr_status).addMBB(loopMBB);
369 BB->addSuccessor(loopMBB);
370 BB->addSuccessor(exitMBB);
376 MI->eraseFromParent(); // The instruction is gone now.
382 AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
383 MachineBasicBlock *BB,
386 A64CC::CondCodes Cond) const {
387 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
389 const BasicBlock *LLVM_BB = BB->getBasicBlock();
390 MachineFunction *MF = BB->getParent();
391 MachineFunction::iterator It = BB;
394 unsigned dest = MI->getOperand(0).getReg();
395 unsigned ptr = MI->getOperand(1).getReg();
396 unsigned incr = MI->getOperand(2).getReg();
397 unsigned oldval = dest;
398 DebugLoc dl = MI->getDebugLoc();
400 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
401 const TargetRegisterClass *TRC, *TRCsp;
403 TRC = &AArch64::GPR64RegClass;
404 TRCsp = &AArch64::GPR64xspRegClass;
406 TRC = &AArch64::GPR32RegClass;
407 TRCsp = &AArch64::GPR32wspRegClass;
410 unsigned ldrOpc, strOpc;
411 getExclusiveOperation(Size, ldrOpc, strOpc);
413 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
414 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
415 MF->insert(It, loopMBB);
416 MF->insert(It, exitMBB);
418 // Transfer the remainder of BB and its successor edges to exitMBB.
419 exitMBB->splice(exitMBB->begin(), BB,
420 llvm::next(MachineBasicBlock::iterator(MI)),
422 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
424 unsigned scratch = MRI.createVirtualRegister(TRC);
425 MRI.constrainRegClass(scratch, TRCsp);
429 // fallthrough --> loopMBB
430 BB->addSuccessor(loopMBB);
434 // cmp incr, dest (, sign extend if necessary)
435 // csel scratch, dest, incr, cond
436 // stxr stxr_status, scratch, ptr
437 // cbnz stxr_status, loopMBB
438 // fallthrough --> exitMBB
440 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
442 // Build compare and cmov instructions.
443 MRI.constrainRegClass(incr, TRCsp);
444 BuildMI(BB, dl, TII->get(CmpOp))
445 .addReg(incr).addReg(oldval).addImm(0);
447 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
449 .addReg(oldval).addReg(incr).addImm(Cond);
451 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
452 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
454 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
455 .addReg(scratch).addReg(ptr);
456 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
457 .addReg(stxr_status).addMBB(loopMBB);
459 BB->addSuccessor(loopMBB);
460 BB->addSuccessor(exitMBB);
466 MI->eraseFromParent(); // The instruction is gone now.
472 AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
473 MachineBasicBlock *BB,
474 unsigned Size) const {
475 unsigned dest = MI->getOperand(0).getReg();
476 unsigned ptr = MI->getOperand(1).getReg();
477 unsigned oldval = MI->getOperand(2).getReg();
478 unsigned newval = MI->getOperand(3).getReg();
479 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
480 DebugLoc dl = MI->getDebugLoc();
482 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
483 const TargetRegisterClass *TRCsp;
484 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
486 unsigned ldrOpc, strOpc;
487 getExclusiveOperation(Size, ldrOpc, strOpc);
489 MachineFunction *MF = BB->getParent();
490 const BasicBlock *LLVM_BB = BB->getBasicBlock();
491 MachineFunction::iterator It = BB;
492 ++It; // insert the new blocks after the current block
494 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
495 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
496 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
497 MF->insert(It, loop1MBB);
498 MF->insert(It, loop2MBB);
499 MF->insert(It, exitMBB);
501 // Transfer the remainder of BB and its successor edges to exitMBB.
502 exitMBB->splice(exitMBB->begin(), BB,
503 llvm::next(MachineBasicBlock::iterator(MI)),
505 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
509 // fallthrough --> loop1MBB
510 BB->addSuccessor(loop1MBB);
517 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
519 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
520 MRI.constrainRegClass(dest, TRCsp);
521 BuildMI(BB, dl, TII->get(CmpOp))
522 .addReg(dest).addReg(oldval).addImm(0);
523 BuildMI(BB, dl, TII->get(AArch64::Bcc))
524 .addImm(A64CC::NE).addMBB(exitMBB);
525 BB->addSuccessor(loop2MBB);
526 BB->addSuccessor(exitMBB);
529 // strex stxr_status, newval, [ptr]
530 // cbnz stxr_status, loop1MBB
532 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
533 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
535 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
536 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
537 .addReg(stxr_status).addMBB(loop1MBB);
538 BB->addSuccessor(loop1MBB);
539 BB->addSuccessor(exitMBB);
545 MI->eraseFromParent(); // The instruction is gone now.
551 AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
552 MachineBasicBlock *MBB) const {
553 // We materialise the F128CSEL pseudo-instruction using conditional branches
554 // and loads, giving an instruciton sequence like:
563 // Using virtual registers would probably not be beneficial since COPY
564 // instructions are expensive for f128 (there's no actual instruction to
567 // An alternative would be to do an integer-CSEL on some address. E.g.:
572 // csel x0, x0, x1, ne
575 // It's unclear which approach is actually optimal.
576 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
577 MachineFunction *MF = MBB->getParent();
578 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
579 DebugLoc DL = MI->getDebugLoc();
580 MachineFunction::iterator It = MBB;
583 unsigned DestReg = MI->getOperand(0).getReg();
584 unsigned IfTrueReg = MI->getOperand(1).getReg();
585 unsigned IfFalseReg = MI->getOperand(2).getReg();
586 unsigned CondCode = MI->getOperand(3).getImm();
587 bool NZCVKilled = MI->getOperand(4).isKill();
589 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
590 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
591 MF->insert(It, TrueBB);
592 MF->insert(It, EndBB);
594 // Transfer rest of current basic-block to EndBB
595 EndBB->splice(EndBB->begin(), MBB,
596 llvm::next(MachineBasicBlock::iterator(MI)),
598 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
600 // We need somewhere to store the f128 value needed.
601 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
603 // [... start of incoming MBB ...]
604 // str qIFFALSE, [sp]
607 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
609 .addFrameIndex(ScratchFI)
611 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
614 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
616 MBB->addSuccessor(TrueBB);
617 MBB->addSuccessor(EndBB);
621 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
623 .addFrameIndex(ScratchFI)
626 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
628 TrueBB->addSuccessor(EndBB);
632 // [... rest of incoming MBB ...]
634 EndBB->addLiveIn(AArch64::NZCV);
635 MachineInstr *StartOfEnd = EndBB->begin();
636 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
637 .addFrameIndex(ScratchFI)
640 MI->eraseFromParent();
645 AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
646 MachineBasicBlock *MBB) const {
647 switch (MI->getOpcode()) {
648 default: llvm_unreachable("Unhandled instruction with custom inserter");
649 case AArch64::F128CSEL:
650 return EmitF128CSEL(MI, MBB);
651 case AArch64::ATOMIC_LOAD_ADD_I8:
652 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
653 case AArch64::ATOMIC_LOAD_ADD_I16:
654 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
655 case AArch64::ATOMIC_LOAD_ADD_I32:
656 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
657 case AArch64::ATOMIC_LOAD_ADD_I64:
658 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
660 case AArch64::ATOMIC_LOAD_SUB_I8:
661 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
662 case AArch64::ATOMIC_LOAD_SUB_I16:
663 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
664 case AArch64::ATOMIC_LOAD_SUB_I32:
665 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
666 case AArch64::ATOMIC_LOAD_SUB_I64:
667 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
669 case AArch64::ATOMIC_LOAD_AND_I8:
670 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
671 case AArch64::ATOMIC_LOAD_AND_I16:
672 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
673 case AArch64::ATOMIC_LOAD_AND_I32:
674 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
675 case AArch64::ATOMIC_LOAD_AND_I64:
676 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
678 case AArch64::ATOMIC_LOAD_OR_I8:
679 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
680 case AArch64::ATOMIC_LOAD_OR_I16:
681 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
682 case AArch64::ATOMIC_LOAD_OR_I32:
683 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
684 case AArch64::ATOMIC_LOAD_OR_I64:
685 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
687 case AArch64::ATOMIC_LOAD_XOR_I8:
688 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
689 case AArch64::ATOMIC_LOAD_XOR_I16:
690 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
691 case AArch64::ATOMIC_LOAD_XOR_I32:
692 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
693 case AArch64::ATOMIC_LOAD_XOR_I64:
694 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
696 case AArch64::ATOMIC_LOAD_NAND_I8:
697 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
698 case AArch64::ATOMIC_LOAD_NAND_I16:
699 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
700 case AArch64::ATOMIC_LOAD_NAND_I32:
701 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
702 case AArch64::ATOMIC_LOAD_NAND_I64:
703 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
705 case AArch64::ATOMIC_LOAD_MIN_I8:
706 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
707 case AArch64::ATOMIC_LOAD_MIN_I16:
708 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
709 case AArch64::ATOMIC_LOAD_MIN_I32:
710 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
711 case AArch64::ATOMIC_LOAD_MIN_I64:
712 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
714 case AArch64::ATOMIC_LOAD_MAX_I8:
715 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
716 case AArch64::ATOMIC_LOAD_MAX_I16:
717 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
718 case AArch64::ATOMIC_LOAD_MAX_I32:
719 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
720 case AArch64::ATOMIC_LOAD_MAX_I64:
721 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
723 case AArch64::ATOMIC_LOAD_UMIN_I8:
724 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
725 case AArch64::ATOMIC_LOAD_UMIN_I16:
726 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
727 case AArch64::ATOMIC_LOAD_UMIN_I32:
728 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
729 case AArch64::ATOMIC_LOAD_UMIN_I64:
730 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
732 case AArch64::ATOMIC_LOAD_UMAX_I8:
733 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
734 case AArch64::ATOMIC_LOAD_UMAX_I16:
735 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
736 case AArch64::ATOMIC_LOAD_UMAX_I32:
737 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
738 case AArch64::ATOMIC_LOAD_UMAX_I64:
739 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
741 case AArch64::ATOMIC_SWAP_I8:
742 return emitAtomicBinary(MI, MBB, 1, 0);
743 case AArch64::ATOMIC_SWAP_I16:
744 return emitAtomicBinary(MI, MBB, 2, 0);
745 case AArch64::ATOMIC_SWAP_I32:
746 return emitAtomicBinary(MI, MBB, 4, 0);
747 case AArch64::ATOMIC_SWAP_I64:
748 return emitAtomicBinary(MI, MBB, 8, 0);
750 case AArch64::ATOMIC_CMP_SWAP_I8:
751 return emitAtomicCmpSwap(MI, MBB, 1);
752 case AArch64::ATOMIC_CMP_SWAP_I16:
753 return emitAtomicCmpSwap(MI, MBB, 2);
754 case AArch64::ATOMIC_CMP_SWAP_I32:
755 return emitAtomicCmpSwap(MI, MBB, 4);
756 case AArch64::ATOMIC_CMP_SWAP_I64:
757 return emitAtomicCmpSwap(MI, MBB, 8);
762 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
764 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
765 case AArch64ISD::Call: return "AArch64ISD::Call";
766 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
767 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
768 case AArch64ISD::BFI: return "AArch64ISD::BFI";
769 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
770 case AArch64ISD::Ret: return "AArch64ISD::Ret";
771 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
772 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
773 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
774 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
775 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
776 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
777 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
779 default: return NULL;
783 static const uint16_t AArch64FPRArgRegs[] = {
784 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
785 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
787 static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
789 static const uint16_t AArch64ArgRegs[] = {
790 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
791 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
793 static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
795 static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
796 CCValAssign::LocInfo LocInfo,
797 ISD::ArgFlagsTy ArgFlags, CCState &State) {
798 // Mark all remaining general purpose registers as allocated. We don't
799 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
800 // i64 will go in registers (C.11).
801 for (unsigned i = 0; i < NumArgRegs; ++i)
802 State.AllocateReg(AArch64ArgRegs[i]);
807 #include "AArch64GenCallingConv.inc"
809 CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
812 default: llvm_unreachable("Unsupported calling convention");
813 case CallingConv::Fast:
820 AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
821 DebugLoc DL, SDValue &Chain) const {
822 MachineFunction &MF = DAG.getMachineFunction();
823 MachineFrameInfo *MFI = MF.getFrameInfo();
824 AArch64MachineFunctionInfo *FuncInfo
825 = MF.getInfo<AArch64MachineFunctionInfo>();
827 SmallVector<SDValue, 8> MemOps;
829 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
831 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
834 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
836 if (GPRSaveSize != 0) {
837 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
839 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
841 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
842 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
843 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
844 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
845 MachinePointerInfo::getStack(i * 8),
847 MemOps.push_back(Store);
848 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
849 DAG.getConstant(8, getPointerTy()));
853 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
855 if (FPRSaveSize != 0) {
856 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
858 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
860 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
861 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
862 &AArch64::FPR128RegClass);
863 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
864 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
865 MachinePointerInfo::getStack(i * 16),
867 MemOps.push_back(Store);
868 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
869 DAG.getConstant(16, getPointerTy()));
873 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
875 FuncInfo->setVariadicStackIdx(StackIdx);
876 FuncInfo->setVariadicGPRIdx(GPRIdx);
877 FuncInfo->setVariadicGPRSize(GPRSaveSize);
878 FuncInfo->setVariadicFPRIdx(FPRIdx);
879 FuncInfo->setVariadicFPRSize(FPRSaveSize);
881 if (!MemOps.empty()) {
882 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
889 AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
890 CallingConv::ID CallConv, bool isVarArg,
891 const SmallVectorImpl<ISD::InputArg> &Ins,
892 DebugLoc dl, SelectionDAG &DAG,
893 SmallVectorImpl<SDValue> &InVals) const {
894 MachineFunction &MF = DAG.getMachineFunction();
895 AArch64MachineFunctionInfo *FuncInfo
896 = MF.getInfo<AArch64MachineFunctionInfo>();
897 MachineFrameInfo *MFI = MF.getFrameInfo();
898 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
900 SmallVector<CCValAssign, 16> ArgLocs;
901 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
902 getTargetMachine(), ArgLocs, *DAG.getContext());
903 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
905 SmallVector<SDValue, 16> ArgValues;
908 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
909 CCValAssign &VA = ArgLocs[i];
910 ISD::ArgFlagsTy Flags = Ins[i].Flags;
912 if (Flags.isByVal()) {
913 // Byval is used for small structs and HFAs in the PCS, but the system
914 // should work in a non-compliant manner for larger structs.
915 EVT PtrTy = getPointerTy();
916 int Size = Flags.getByValSize();
917 unsigned NumRegs = (Size + 7) / 8;
919 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
920 VA.getLocMemOffset(),
922 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
923 InVals.push_back(FrameIdxN);
926 } else if (VA.isRegLoc()) {
927 MVT RegVT = VA.getLocVT();
928 const TargetRegisterClass *RC = getRegClassFor(RegVT);
929 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
931 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
932 } else { // VA.isRegLoc()
933 assert(VA.isMemLoc());
935 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
936 VA.getLocMemOffset(), true);
938 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
939 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
940 MachinePointerInfo::getFixedStack(FI),
941 false, false, false, 0);
946 switch (VA.getLocInfo()) {
947 default: llvm_unreachable("Unknown loc info!");
948 case CCValAssign::Full: break;
949 case CCValAssign::BCvt:
950 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
952 case CCValAssign::SExt:
953 case CCValAssign::ZExt:
954 case CCValAssign::AExt: {
955 unsigned DestSize = VA.getValVT().getSizeInBits();
959 case 8: DestSubReg = AArch64::sub_8; break;
960 case 16: DestSubReg = AArch64::sub_16; break;
961 case 32: DestSubReg = AArch64::sub_32; break;
962 case 64: DestSubReg = AArch64::sub_64; break;
963 default: llvm_unreachable("Unexpected argument promotion");
966 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
967 VA.getValVT(), ArgValue,
968 DAG.getTargetConstant(DestSubReg, MVT::i32)),
974 InVals.push_back(ArgValue);
978 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
980 unsigned StackArgSize = CCInfo.getNextStackOffset();
981 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
982 // This is a non-standard ABI so by fiat I say we're allowed to make full
983 // use of the stack area to be popped, which must be aligned to 16 bytes in
985 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
987 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
989 FuncInfo->setArgumentStackToRestore(StackArgSize);
991 // This realignment carries over to the available bytes below. Our own
992 // callers will guarantee the space is free by giving an aligned value to
995 // Even if we're not expected to free up the space, it's useful to know how
996 // much is there while considering tail calls (because we can reuse it).
997 FuncInfo->setBytesInStackArgArea(StackArgSize);
1003 AArch64TargetLowering::LowerReturn(SDValue Chain,
1004 CallingConv::ID CallConv, bool isVarArg,
1005 const SmallVectorImpl<ISD::OutputArg> &Outs,
1006 const SmallVectorImpl<SDValue> &OutVals,
1007 DebugLoc dl, SelectionDAG &DAG) const {
1008 // CCValAssign - represent the assignment of the return value to a location.
1009 SmallVector<CCValAssign, 16> RVLocs;
1011 // CCState - Info about the registers and stack slots.
1012 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1013 getTargetMachine(), RVLocs, *DAG.getContext());
1015 // Analyze outgoing return values.
1016 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1019 SmallVector<SDValue, 4> RetOps(1, Chain);
1021 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1022 // PCS: "If the type, T, of the result of a function is such that
1023 // void func(T arg) would require that arg be passed as a value in a
1024 // register (or set of registers) according to the rules in 5.4, then the
1025 // result is returned in the same registers as would be used for such an
1028 // Otherwise, the caller shall reserve a block of memory of sufficient
1029 // size and alignment to hold the result. The address of the memory block
1030 // shall be passed as an additional argument to the function in x8."
1032 // This is implemented in two places. The register-return values are dealt
1033 // with here, more complex returns are passed as an sret parameter, which
1034 // means we don't have to worry about it during actual return.
1035 CCValAssign &VA = RVLocs[i];
1036 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1039 SDValue Arg = OutVals[i];
1041 // There's no convenient note in the ABI about this as there is for normal
1042 // arguments, but it says return values are passed in the same registers as
1043 // an argument would be. I believe that includes the comments about
1044 // unspecified higher bits, putting the burden of widening on the *caller*
1045 // for return values.
1046 switch (VA.getLocInfo()) {
1047 default: llvm_unreachable("Unknown loc info");
1048 case CCValAssign::Full: break;
1049 case CCValAssign::SExt:
1050 case CCValAssign::ZExt:
1051 case CCValAssign::AExt:
1052 // Floating-point values should only be extended when they're going into
1053 // memory, which can't happen here so an integer extend is acceptable.
1054 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1056 case CCValAssign::BCvt:
1057 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1061 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1062 Flag = Chain.getValue(1);
1063 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1066 RetOps[0] = Chain; // Update chain.
1068 // Add the flag if we have it.
1070 RetOps.push_back(Flag);
1072 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1073 &RetOps[0], RetOps.size());
1077 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1078 SmallVectorImpl<SDValue> &InVals) const {
1079 SelectionDAG &DAG = CLI.DAG;
1080 DebugLoc &dl = CLI.DL;
1081 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
1082 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
1083 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
1084 SDValue Chain = CLI.Chain;
1085 SDValue Callee = CLI.Callee;
1086 bool &IsTailCall = CLI.IsTailCall;
1087 CallingConv::ID CallConv = CLI.CallConv;
1088 bool IsVarArg = CLI.IsVarArg;
1090 MachineFunction &MF = DAG.getMachineFunction();
1091 AArch64MachineFunctionInfo *FuncInfo
1092 = MF.getInfo<AArch64MachineFunctionInfo>();
1093 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1094 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1095 bool IsSibCall = false;
1098 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1099 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1100 Outs, OutVals, Ins, DAG);
1102 // A sibling call is one where we're under the usual C ABI and not planning
1103 // to change that but can still do a tail call:
1104 if (!TailCallOpt && IsTailCall)
1108 SmallVector<CCValAssign, 16> ArgLocs;
1109 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1110 getTargetMachine(), ArgLocs, *DAG.getContext());
1111 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1113 // On AArch64 (and all other architectures I'm aware of) the most this has to
1114 // do is adjust the stack pointer.
1115 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1117 // Since we're not changing the ABI to make this a tail call, the memory
1118 // operands are already available in the caller's incoming argument space.
1122 // FPDiff is the byte offset of the call's argument area from the callee's.
1123 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1124 // by this amount for a tail call. In a sibling call it must be 0 because the
1125 // caller will deallocate the entire stack and the callee still expects its
1126 // arguments to begin at SP+0. Completely unused for non-tail calls.
1129 if (IsTailCall && !IsSibCall) {
1130 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1132 // FPDiff will be negative if this tail call requires more space than we
1133 // would automatically have in our incoming argument space. Positive if we
1134 // can actually shrink the stack.
1135 FPDiff = NumReusableBytes - NumBytes;
1137 // The stack pointer must be 16-byte aligned at all times it's used for a
1138 // memory operation, which in practice means at *all* times and in
1139 // particular across call boundaries. Therefore our own arguments started at
1140 // a 16-byte aligned SP and the delta applied for the tail call should
1141 // satisfy the same constraint.
1142 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1146 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
1148 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1151 SmallVector<SDValue, 8> MemOpChains;
1152 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1154 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1155 CCValAssign &VA = ArgLocs[i];
1156 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1157 SDValue Arg = OutVals[i];
1159 // Callee does the actual widening, so all extensions just use an implicit
1160 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1161 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1162 // alternative works on integer types too.
1163 switch (VA.getLocInfo()) {
1164 default: llvm_unreachable("Unknown loc info!");
1165 case CCValAssign::Full: break;
1166 case CCValAssign::SExt:
1167 case CCValAssign::ZExt:
1168 case CCValAssign::AExt: {
1169 unsigned SrcSize = VA.getValVT().getSizeInBits();
1173 case 8: SrcSubReg = AArch64::sub_8; break;
1174 case 16: SrcSubReg = AArch64::sub_16; break;
1175 case 32: SrcSubReg = AArch64::sub_32; break;
1176 case 64: SrcSubReg = AArch64::sub_64; break;
1177 default: llvm_unreachable("Unexpected argument promotion");
1180 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1182 DAG.getUNDEF(VA.getLocVT()),
1184 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1189 case CCValAssign::BCvt:
1190 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1194 if (VA.isRegLoc()) {
1195 // A normal register (sub-) argument. For now we just note it down because
1196 // we want to copy things into registers as late as possible to avoid
1197 // register-pressure (and possibly worse).
1198 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1202 assert(VA.isMemLoc() && "unexpected argument location");
1205 MachinePointerInfo DstInfo;
1207 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1208 VA.getLocVT().getSizeInBits();
1209 OpSize = (OpSize + 7) / 8;
1210 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1211 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1213 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1214 DstInfo = MachinePointerInfo::getFixedStack(FI);
1216 // Make sure any stack arguments overlapping with where we're storing are
1217 // loaded before this eventual operation. Otherwise they'll be clobbered.
1218 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1220 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
1222 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1223 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1226 if (Flags.isByVal()) {
1227 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1228 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1229 Flags.getByValAlign(),
1230 /*isVolatile = */ false,
1231 /*alwaysInline = */ false,
1232 DstInfo, MachinePointerInfo(0));
1233 MemOpChains.push_back(Cpy);
1235 // Normal stack argument, put it where it's needed.
1236 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1238 MemOpChains.push_back(Store);
1242 // The loads and stores generated above shouldn't clash with each
1243 // other. Combining them with this TokenFactor notes that fact for the rest of
1245 if (!MemOpChains.empty())
1246 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1247 &MemOpChains[0], MemOpChains.size());
1249 // Most of the rest of the instructions need to be glued together; we don't
1250 // want assignments to actual registers used by a call to be rearranged by a
1251 // well-meaning scheduler.
1254 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1255 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1256 RegsToPass[i].second, InFlag);
1257 InFlag = Chain.getValue(1);
1260 // The linker is responsible for inserting veneers when necessary to put a
1261 // function call destination in range, so we don't need to bother with a
1263 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1264 const GlobalValue *GV = G->getGlobal();
1265 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1266 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1267 const char *Sym = S->getSymbol();
1268 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1271 // We don't usually want to end the call-sequence here because we would tidy
1272 // the frame up *after* the call, however in the ABI-changing tail-call case
1273 // we've carefully laid out the parameters so that when sp is reset they'll be
1274 // in the correct location.
1275 if (IsTailCall && !IsSibCall) {
1276 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1277 DAG.getIntPtrConstant(0, true), InFlag);
1278 InFlag = Chain.getValue(1);
1281 // We produce the following DAG scheme for the actual call instruction:
1282 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1284 // Most arguments aren't going to be used and just keep the values live as
1285 // far as LLVM is concerned. It's expected to be selected as simply "bl
1286 // callee" (for a direct, non-tail call).
1287 std::vector<SDValue> Ops;
1288 Ops.push_back(Chain);
1289 Ops.push_back(Callee);
1292 // Each tail call may have to adjust the stack by a different amount, so
1293 // this information must travel along with the operation for eventual
1294 // consumption by emitEpilogue.
1295 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1298 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1299 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1300 RegsToPass[i].second.getValueType()));
1303 // Add a register mask operand representing the call-preserved registers. This
1304 // is used later in codegen to constrain register-allocation.
1305 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1306 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1307 assert(Mask && "Missing call preserved mask for calling convention");
1308 Ops.push_back(DAG.getRegisterMask(Mask));
1310 // If we needed glue, put it in as the last argument.
1311 if (InFlag.getNode())
1312 Ops.push_back(InFlag);
1314 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1317 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1320 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1321 InFlag = Chain.getValue(1);
1323 // Now we can reclaim the stack, just as well do it before working out where
1324 // our return value is.
1326 uint64_t CalleePopBytes
1327 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1329 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1330 DAG.getIntPtrConstant(CalleePopBytes, true),
1332 InFlag = Chain.getValue(1);
1335 return LowerCallResult(Chain, InFlag, CallConv,
1336 IsVarArg, Ins, dl, DAG, InVals);
1340 AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1341 CallingConv::ID CallConv, bool IsVarArg,
1342 const SmallVectorImpl<ISD::InputArg> &Ins,
1343 DebugLoc dl, SelectionDAG &DAG,
1344 SmallVectorImpl<SDValue> &InVals) const {
1345 // Assign locations to each value returned by this call.
1346 SmallVector<CCValAssign, 16> RVLocs;
1347 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1348 getTargetMachine(), RVLocs, *DAG.getContext());
1349 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1351 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1352 CCValAssign VA = RVLocs[i];
1354 // Return values that are too big to fit into registers should use an sret
1355 // pointer, so this can be a lot simpler than the main argument code.
1356 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1358 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1360 Chain = Val.getValue(1);
1361 InFlag = Val.getValue(2);
1363 switch (VA.getLocInfo()) {
1364 default: llvm_unreachable("Unknown loc info!");
1365 case CCValAssign::Full: break;
1366 case CCValAssign::BCvt:
1367 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1369 case CCValAssign::ZExt:
1370 case CCValAssign::SExt:
1371 case CCValAssign::AExt:
1372 // Floating-point arguments only get extended/truncated if they're going
1373 // in memory, so using the integer operation is acceptable here.
1374 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1378 InVals.push_back(Val);
1385 AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1386 CallingConv::ID CalleeCC,
1388 bool IsCalleeStructRet,
1389 bool IsCallerStructRet,
1390 const SmallVectorImpl<ISD::OutputArg> &Outs,
1391 const SmallVectorImpl<SDValue> &OutVals,
1392 const SmallVectorImpl<ISD::InputArg> &Ins,
1393 SelectionDAG& DAG) const {
1395 // For CallingConv::C this function knows whether the ABI needs
1396 // changing. That's not true for other conventions so they will have to opt in
1398 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1401 const MachineFunction &MF = DAG.getMachineFunction();
1402 const Function *CallerF = MF.getFunction();
1403 CallingConv::ID CallerCC = CallerF->getCallingConv();
1404 bool CCMatch = CallerCC == CalleeCC;
1406 // Byval parameters hand the function a pointer directly into the stack area
1407 // we want to reuse during a tail call. Working around this *is* possible (see
1408 // X86) but less efficient and uglier in LowerCall.
1409 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1410 e = CallerF->arg_end(); i != e; ++i)
1411 if (i->hasByValAttr())
1414 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1415 if (IsTailCallConvention(CalleeCC) && CCMatch)
1420 // Now we search for cases where we can use a tail call without changing the
1421 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1424 // I want anyone implementing a new calling convention to think long and hard
1425 // about this assert.
1426 assert((!IsVarArg || CalleeCC == CallingConv::C)
1427 && "Unexpected variadic calling convention");
1429 if (IsVarArg && !Outs.empty()) {
1430 // At least two cases here: if caller is fastcc then we can't have any
1431 // memory arguments (we'd be expected to clean up the stack afterwards). If
1432 // caller is C then we could potentially use its argument area.
1434 // FIXME: for now we take the most conservative of these in both cases:
1435 // disallow all variadic memory operands.
1436 SmallVector<CCValAssign, 16> ArgLocs;
1437 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1438 getTargetMachine(), ArgLocs, *DAG.getContext());
1440 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1441 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1442 if (!ArgLocs[i].isRegLoc())
1446 // If the calling conventions do not match, then we'd better make sure the
1447 // results are returned in the same way as what the caller expects.
1449 SmallVector<CCValAssign, 16> RVLocs1;
1450 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1451 getTargetMachine(), RVLocs1, *DAG.getContext());
1452 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1454 SmallVector<CCValAssign, 16> RVLocs2;
1455 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1456 getTargetMachine(), RVLocs2, *DAG.getContext());
1457 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1459 if (RVLocs1.size() != RVLocs2.size())
1461 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1462 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1464 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1466 if (RVLocs1[i].isRegLoc()) {
1467 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1470 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1476 // Nothing more to check if the callee is taking no arguments
1480 SmallVector<CCValAssign, 16> ArgLocs;
1481 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1482 getTargetMachine(), ArgLocs, *DAG.getContext());
1484 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1486 const AArch64MachineFunctionInfo *FuncInfo
1487 = MF.getInfo<AArch64MachineFunctionInfo>();
1489 // If the stack arguments for this call would fit into our own save area then
1490 // the call can be made tail.
1491 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1494 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1495 bool TailCallOpt) const {
1496 return CallCC == CallingConv::Fast && TailCallOpt;
1499 bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1500 return CallCC == CallingConv::Fast;
1503 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1505 MachineFrameInfo *MFI,
1506 int ClobberedFI) const {
1507 SmallVector<SDValue, 8> ArgChains;
1508 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1509 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1511 // Include the original chain at the beginning of the list. When this is
1512 // used by target LowerCall hooks, this helps legalize find the
1513 // CALLSEQ_BEGIN node.
1514 ArgChains.push_back(Chain);
1516 // Add a chain value for each stack argument corresponding
1517 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1518 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1519 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1520 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1521 if (FI->getIndex() < 0) {
1522 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1523 int64_t InLastByte = InFirstByte;
1524 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1526 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1527 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1528 ArgChains.push_back(SDValue(L, 1));
1531 // Build a tokenfactor for all the chains.
1532 return DAG.getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
1533 &ArgChains[0], ArgChains.size());
1536 static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1538 case ISD::SETEQ: return A64CC::EQ;
1539 case ISD::SETGT: return A64CC::GT;
1540 case ISD::SETGE: return A64CC::GE;
1541 case ISD::SETLT: return A64CC::LT;
1542 case ISD::SETLE: return A64CC::LE;
1543 case ISD::SETNE: return A64CC::NE;
1544 case ISD::SETUGT: return A64CC::HI;
1545 case ISD::SETUGE: return A64CC::HS;
1546 case ISD::SETULT: return A64CC::LO;
1547 case ISD::SETULE: return A64CC::LS;
1548 default: llvm_unreachable("Unexpected condition code");
1552 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1553 // icmp is implemented using adds/subs immediate, which take an unsigned
1554 // 12-bit immediate, optionally shifted left by 12 bits.
1556 // Symmetric by using adds/subs
1560 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1563 SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1564 ISD::CondCode CC, SDValue &A64cc,
1565 SelectionDAG &DAG, DebugLoc &dl) const {
1566 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1568 EVT VT = RHSC->getValueType(0);
1569 bool knownInvalid = false;
1571 // I'm not convinced the rest of LLVM handles these edge cases properly, but
1572 // we can at least get it right.
1573 if (isSignedIntSetCC(CC)) {
1574 C = RHSC->getSExtValue();
1575 } else if (RHSC->getZExtValue() > INT64_MAX) {
1576 // A 64-bit constant not representable by a signed 64-bit integer is far
1577 // too big to fit into a SUBS immediate anyway.
1578 knownInvalid = true;
1580 C = RHSC->getZExtValue();
1583 if (!knownInvalid && !isLegalICmpImmediate(C)) {
1584 // Constant does not fit, try adjusting it by one?
1589 if (isLegalICmpImmediate(C-1)) {
1590 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1591 RHS = DAG.getConstant(C-1, VT);
1596 if (isLegalICmpImmediate(C-1)) {
1597 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1598 RHS = DAG.getConstant(C-1, VT);
1603 if (isLegalICmpImmediate(C+1)) {
1604 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1605 RHS = DAG.getConstant(C+1, VT);
1610 if (isLegalICmpImmediate(C+1)) {
1611 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1612 RHS = DAG.getConstant(C+1, VT);
1619 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
1620 A64cc = DAG.getConstant(CondCode, MVT::i32);
1621 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1622 DAG.getCondCode(CC));
1625 static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
1626 A64CC::CondCodes &Alternative) {
1627 A64CC::CondCodes CondCode = A64CC::Invalid;
1628 Alternative = A64CC::Invalid;
1631 default: llvm_unreachable("Unknown FP condition!");
1633 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
1635 case ISD::SETOGT: CondCode = A64CC::GT; break;
1637 case ISD::SETOGE: CondCode = A64CC::GE; break;
1638 case ISD::SETOLT: CondCode = A64CC::MI; break;
1639 case ISD::SETOLE: CondCode = A64CC::LS; break;
1640 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
1641 case ISD::SETO: CondCode = A64CC::VC; break;
1642 case ISD::SETUO: CondCode = A64CC::VS; break;
1643 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
1644 case ISD::SETUGT: CondCode = A64CC::HI; break;
1645 case ISD::SETUGE: CondCode = A64CC::PL; break;
1647 case ISD::SETULT: CondCode = A64CC::LT; break;
1649 case ISD::SETULE: CondCode = A64CC::LE; break;
1651 case ISD::SETUNE: CondCode = A64CC::NE; break;
1657 AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1658 DebugLoc DL = Op.getDebugLoc();
1659 EVT PtrVT = getPointerTy();
1660 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1662 assert(getTargetMachine().getCodeModel() == CodeModel::Small
1663 && "Only small code model supported at the moment");
1665 // The most efficient code is PC-relative anyway for the small memory model,
1666 // so we don't need to worry about relocation model.
1667 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
1668 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1669 AArch64II::MO_NO_FLAG),
1670 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1671 AArch64II::MO_LO12),
1672 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
1676 // (BRCOND chain, val, dest)
1678 AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1679 DebugLoc dl = Op.getDebugLoc();
1680 SDValue Chain = Op.getOperand(0);
1681 SDValue TheBit = Op.getOperand(1);
1682 SDValue DestBB = Op.getOperand(2);
1684 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
1685 // that as the consumer we are responsible for ignoring rubbish in higher
1687 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
1688 DAG.getConstant(1, MVT::i32));
1690 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
1691 DAG.getConstant(0, TheBit.getValueType()),
1692 DAG.getCondCode(ISD::SETNE));
1694 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
1695 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
1699 // (BR_CC chain, condcode, lhs, rhs, dest)
1701 AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1702 DebugLoc dl = Op.getDebugLoc();
1703 SDValue Chain = Op.getOperand(0);
1704 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1705 SDValue LHS = Op.getOperand(2);
1706 SDValue RHS = Op.getOperand(3);
1707 SDValue DestBB = Op.getOperand(4);
1709 if (LHS.getValueType() == MVT::f128) {
1710 // f128 comparisons are lowered to runtime calls by a routine which sets
1711 // LHS, RHS and CC appropriately for the rest of this function to continue.
1712 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
1714 // If softenSetCCOperands returned a scalar, we need to compare the result
1715 // against zero to select between true and false values.
1716 if (RHS.getNode() == 0) {
1717 RHS = DAG.getConstant(0, LHS.getValueType());
1722 if (LHS.getValueType().isInteger()) {
1725 // Integers are handled in a separate function because the combinations of
1726 // immediates and tests can get hairy and we may want to fiddle things.
1727 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
1729 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1730 Chain, CmpOp, A64cc, DestBB);
1733 // Note that some LLVM floating-point CondCodes can't be lowered to a single
1734 // conditional branch, hence FPCCToA64CC can set a second test, where either
1735 // passing is sufficient.
1736 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
1737 CondCode = FPCCToA64CC(CC, Alternative);
1738 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
1739 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1740 DAG.getCondCode(CC));
1741 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1742 Chain, SetCC, A64cc, DestBB);
1744 if (Alternative != A64CC::Invalid) {
1745 A64cc = DAG.getConstant(Alternative, MVT::i32);
1746 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1747 A64BR_CC, SetCC, A64cc, DestBB);
1755 AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
1756 RTLIB::Libcall Call) const {
1759 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
1760 EVT ArgVT = Op.getOperand(i).getValueType();
1761 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1762 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
1763 Entry.isSExt = false;
1764 Entry.isZExt = false;
1765 Args.push_back(Entry);
1767 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
1769 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
1771 // By default, the input chain to this libcall is the entry node of the
1772 // function. If the libcall is going to be emitted as a tail call then
1773 // isUsedByReturnOnly will change it to the right chain if the return
1774 // node which is being folded has a non-entry input chain.
1775 SDValue InChain = DAG.getEntryNode();
1777 // isTailCall may be true since the callee does not reference caller stack
1778 // frame. Check if it's in the right position.
1779 SDValue TCChain = InChain;
1780 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
1785 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
1786 0, getLibcallCallingConv(Call), isTailCall,
1787 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1788 Callee, Args, DAG, Op->getDebugLoc());
1789 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1791 if (!CallInfo.second.getNode())
1792 // It's a tailcall, return the chain (which is the DAG root).
1793 return DAG.getRoot();
1795 return CallInfo.first;
1799 AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
1800 if (Op.getOperand(0).getValueType() != MVT::f128) {
1801 // It's legal except when f128 is involved
1806 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
1808 SDValue SrcVal = Op.getOperand(0);
1809 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
1810 /*isSigned*/ false, Op.getDebugLoc());
1814 AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
1815 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
1818 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
1820 return LowerF128ToCall(Op, DAG, LC);
1824 AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1825 bool IsSigned) const {
1826 if (Op.getOperand(0).getValueType() != MVT::f128) {
1827 // It's legal except when f128 is involved
1833 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
1835 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
1837 return LowerF128ToCall(Op, DAG, LC);
1841 AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
1842 SelectionDAG &DAG) const {
1843 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
1844 // we make that distinction here.
1846 // We support the small memory model for now.
1847 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
1849 EVT PtrVT = getPointerTy();
1850 DebugLoc dl = Op.getDebugLoc();
1851 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
1852 const GlobalValue *GV = GN->getGlobal();
1853 unsigned Alignment = GV->getAlignment();
1854 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1856 if (GV->isWeakForLinker() && RelocM == Reloc::Static) {
1857 // Weak symbols can't use ADRP/ADD pair since they should evaluate to
1858 // zero when undefined. In PIC mode the GOT can take care of this, but in
1859 // absolute mode we use a constant pool load.
1861 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
1862 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
1863 AArch64II::MO_NO_FLAG),
1864 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
1865 AArch64II::MO_LO12),
1866 DAG.getConstant(8, MVT::i32));
1867 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
1868 MachinePointerInfo::getConstantPool(),
1869 /*isVolatile=*/ false,
1870 /*isNonTemporal=*/ true,
1871 /*isInvariant=*/ true, 8);
1872 if (GN->getOffset() != 0)
1873 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
1874 DAG.getConstant(GN->getOffset(), PtrVT));
1879 if (Alignment == 0) {
1880 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
1881 if (GVPtrTy->getElementType()->isSized()) {
1883 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
1885 // Be conservative if we can't guess, not that it really matters:
1886 // functions and labels aren't valid for loads, and the methods used to
1887 // actually calculate an address work with any alignment.
1892 unsigned char HiFixup, LoFixup;
1893 bool UseGOT = Subtarget->GVIsIndirectSymbol(GV, RelocM);
1896 HiFixup = AArch64II::MO_GOT;
1897 LoFixup = AArch64II::MO_GOT_LO12;
1900 HiFixup = AArch64II::MO_NO_FLAG;
1901 LoFixup = AArch64II::MO_LO12;
1904 // AArch64's small model demands the following sequence:
1905 // ADRP x0, somewhere
1906 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
1907 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
1908 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1910 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1912 DAG.getConstant(Alignment, MVT::i32));
1915 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
1919 if (GN->getOffset() != 0)
1920 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
1921 DAG.getConstant(GN->getOffset(), PtrVT));
1926 SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
1929 SelectionDAG &DAG) const {
1930 EVT PtrVT = getPointerTy();
1932 // The function we need to call is simply the first entry in the GOT for this
1933 // descriptor, load it in preparation.
1934 SDValue Func, Chain;
1935 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
1938 // The function takes only one argument: the address of the descriptor itself
1941 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
1942 Glue = Chain.getValue(1);
1944 // Finally, there's a special calling-convention which means that the lookup
1945 // must preserve all registers (except X0, obviously).
1946 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1947 const AArch64RegisterInfo *A64RI
1948 = static_cast<const AArch64RegisterInfo *>(TRI);
1949 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
1951 // We're now ready to populate the argument list, as with a normal call:
1952 std::vector<SDValue> Ops;
1953 Ops.push_back(Chain);
1954 Ops.push_back(Func);
1955 Ops.push_back(SymAddr);
1956 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
1957 Ops.push_back(DAG.getRegisterMask(Mask));
1958 Ops.push_back(Glue);
1960 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1961 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
1963 Glue = Chain.getValue(1);
1965 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
1966 // back to the generic handling code.
1967 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
1971 AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
1972 SelectionDAG &DAG) const {
1973 assert(Subtarget->isTargetELF() &&
1974 "TLS not implemented for non-ELF targets");
1975 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1977 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
1980 EVT PtrVT = getPointerTy();
1981 DebugLoc DL = Op.getDebugLoc();
1982 const GlobalValue *GV = GA->getGlobal();
1984 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
1986 if (Model == TLSModel::InitialExec) {
1987 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
1988 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1989 AArch64II::MO_GOTTPREL),
1990 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1991 AArch64II::MO_GOTTPREL_LO12),
1992 DAG.getConstant(8, MVT::i32));
1993 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
1995 } else if (Model == TLSModel::LocalExec) {
1996 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
1997 AArch64II::MO_TPREL_G1);
1998 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
1999 AArch64II::MO_TPREL_G0_NC);
2001 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2002 DAG.getTargetConstant(0, MVT::i32)), 0);
2003 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2005 DAG.getTargetConstant(0, MVT::i32)), 0);
2006 } else if (Model == TLSModel::GeneralDynamic) {
2007 // Accesses used in this sequence go via the TLS descriptor which lives in
2008 // the GOT. Prepare an address we can use to handle this.
2009 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2010 AArch64II::MO_TLSDESC);
2011 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2012 AArch64II::MO_TLSDESC_LO12);
2013 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2015 DAG.getConstant(8, MVT::i32));
2016 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2018 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2019 } else if (Model == TLSModel::LocalDynamic) {
2020 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2021 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2022 // the beginning of the module's TLS region, followed by a DTPREL offset
2025 // These accesses will need deduplicating if there's more than one.
2026 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2027 .getInfo<AArch64MachineFunctionInfo>();
2028 MFI->incNumLocalDynamicTLSAccesses();
2031 // Get the location of _TLS_MODULE_BASE_:
2032 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2033 AArch64II::MO_TLSDESC);
2034 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2035 AArch64II::MO_TLSDESC_LO12);
2036 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2038 DAG.getConstant(8, MVT::i32));
2039 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2041 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2043 // Get the variable's offset from _TLS_MODULE_BASE_
2044 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2045 AArch64II::MO_DTPREL_G1);
2046 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2047 AArch64II::MO_DTPREL_G0_NC);
2049 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2050 DAG.getTargetConstant(0, MVT::i32)), 0);
2051 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2053 DAG.getTargetConstant(0, MVT::i32)), 0);
2055 llvm_unreachable("Unsupported TLS access model");
2058 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2062 AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2063 bool IsSigned) const {
2064 if (Op.getValueType() != MVT::f128) {
2065 // Legal for everything except f128.
2071 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2073 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2075 return LowerF128ToCall(Op, DAG, LC);
2080 AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2081 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2082 DebugLoc dl = JT->getDebugLoc();
2084 // When compiling PIC, jump tables get put in the code section so a static
2085 // relocation-style is acceptable for both cases.
2086 return DAG.getNode(AArch64ISD::WrapperSmall, dl, getPointerTy(),
2087 DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()),
2088 DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
2089 AArch64II::MO_LO12),
2090 DAG.getConstant(1, MVT::i32));
2093 // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
2095 AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2096 DebugLoc dl = Op.getDebugLoc();
2097 SDValue LHS = Op.getOperand(0);
2098 SDValue RHS = Op.getOperand(1);
2099 SDValue IfTrue = Op.getOperand(2);
2100 SDValue IfFalse = Op.getOperand(3);
2101 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2103 if (LHS.getValueType() == MVT::f128) {
2104 // f128 comparisons are lowered to libcalls, but slot in nicely here
2106 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2108 // If softenSetCCOperands returned a scalar, we need to compare the result
2109 // against zero to select between true and false values.
2110 if (RHS.getNode() == 0) {
2111 RHS = DAG.getConstant(0, LHS.getValueType());
2116 if (LHS.getValueType().isInteger()) {
2119 // Integers are handled in a separate function because the combinations of
2120 // immediates and tests can get hairy and we may want to fiddle things.
2121 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2123 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2124 CmpOp, IfTrue, IfFalse, A64cc);
2127 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2128 // conditional branch, hence FPCCToA64CC can set a second test, where either
2129 // passing is sufficient.
2130 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2131 CondCode = FPCCToA64CC(CC, Alternative);
2132 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2133 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2134 DAG.getCondCode(CC));
2135 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
2137 SetCC, IfTrue, IfFalse, A64cc);
2139 if (Alternative != A64CC::Invalid) {
2140 A64cc = DAG.getConstant(Alternative, MVT::i32);
2141 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2142 SetCC, IfTrue, A64SELECT_CC, A64cc);
2146 return A64SELECT_CC;
2149 // (SELECT testbit, iftrue, iffalse)
2151 AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2152 DebugLoc dl = Op.getDebugLoc();
2153 SDValue TheBit = Op.getOperand(0);
2154 SDValue IfTrue = Op.getOperand(1);
2155 SDValue IfFalse = Op.getOperand(2);
2157 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2158 // that as the consumer we are responsible for ignoring rubbish in higher
2160 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2161 DAG.getConstant(1, MVT::i32));
2162 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2163 DAG.getConstant(0, TheBit.getValueType()),
2164 DAG.getCondCode(ISD::SETNE));
2166 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2167 A64CMP, IfTrue, IfFalse,
2168 DAG.getConstant(A64CC::NE, MVT::i32));
2171 // (SETCC lhs, rhs, condcode)
2173 AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2174 DebugLoc dl = Op.getDebugLoc();
2175 SDValue LHS = Op.getOperand(0);
2176 SDValue RHS = Op.getOperand(1);
2177 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2178 EVT VT = Op.getValueType();
2180 if (LHS.getValueType() == MVT::f128) {
2181 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
2182 // for the rest of the function (some i32 or i64 values).
2183 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2185 // If softenSetCCOperands returned a scalar, use it.
2186 if (RHS.getNode() == 0) {
2187 assert(LHS.getValueType() == Op.getValueType() &&
2188 "Unexpected setcc expansion!");
2193 if (LHS.getValueType().isInteger()) {
2196 // Integers are handled in a separate function because the combinations of
2197 // immediates and tests can get hairy and we may want to fiddle things.
2198 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2200 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2201 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
2205 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2206 // conditional branch, hence FPCCToA64CC can set a second test, where either
2207 // passing is sufficient.
2208 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2209 CondCode = FPCCToA64CC(CC, Alternative);
2210 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2211 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2212 DAG.getCondCode(CC));
2213 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2214 CmpOp, DAG.getConstant(1, VT),
2215 DAG.getConstant(0, VT), A64cc);
2217 if (Alternative != A64CC::Invalid) {
2218 A64cc = DAG.getConstant(Alternative, MVT::i32);
2219 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
2220 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
2223 return A64SELECT_CC;
2227 AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2228 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2229 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2231 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
2232 // rather than just 8.
2233 return DAG.getMemcpy(Op.getOperand(0), Op.getDebugLoc(),
2234 Op.getOperand(1), Op.getOperand(2),
2235 DAG.getConstant(32, MVT::i32), 8, false, false,
2236 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
2240 AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2241 // The layout of the va_list struct is specified in the AArch64 Procedure Call
2242 // Standard, section B.3.
2243 MachineFunction &MF = DAG.getMachineFunction();
2244 AArch64MachineFunctionInfo *FuncInfo
2245 = MF.getInfo<AArch64MachineFunctionInfo>();
2246 DebugLoc DL = Op.getDebugLoc();
2248 SDValue Chain = Op.getOperand(0);
2249 SDValue VAList = Op.getOperand(1);
2250 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2251 SmallVector<SDValue, 4> MemOps;
2253 // void *__stack at offset 0
2254 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
2256 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
2257 MachinePointerInfo(SV), false, false, 0));
2259 // void *__gr_top at offset 8
2260 int GPRSize = FuncInfo->getVariadicGPRSize();
2262 SDValue GRTop, GRTopAddr;
2264 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2265 DAG.getConstant(8, getPointerTy()));
2267 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
2268 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
2269 DAG.getConstant(GPRSize, getPointerTy()));
2271 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
2272 MachinePointerInfo(SV, 8),
2276 // void *__vr_top at offset 16
2277 int FPRSize = FuncInfo->getVariadicFPRSize();
2279 SDValue VRTop, VRTopAddr;
2280 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2281 DAG.getConstant(16, getPointerTy()));
2283 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
2284 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
2285 DAG.getConstant(FPRSize, getPointerTy()));
2287 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
2288 MachinePointerInfo(SV, 16),
2292 // int __gr_offs at offset 24
2293 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2294 DAG.getConstant(24, getPointerTy()));
2295 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
2296 GROffsAddr, MachinePointerInfo(SV, 24),
2299 // int __vr_offs at offset 28
2300 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2301 DAG.getConstant(28, getPointerTy()));
2302 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
2303 VROffsAddr, MachinePointerInfo(SV, 28),
2306 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
2311 AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2312 switch (Op.getOpcode()) {
2313 default: llvm_unreachable("Don't know how to custom lower this!");
2314 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
2315 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
2316 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
2317 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
2318 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
2319 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
2320 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
2321 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
2322 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
2323 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
2325 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2326 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2327 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
2328 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
2329 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2330 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2331 case ISD::SELECT: return LowerSELECT(Op, DAG);
2332 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2333 case ISD::SETCC: return LowerSETCC(Op, DAG);
2334 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
2335 case ISD::VASTART: return LowerVASTART(Op, DAG);
2341 static SDValue PerformANDCombine(SDNode *N,
2342 TargetLowering::DAGCombinerInfo &DCI) {
2344 SelectionDAG &DAG = DCI.DAG;
2345 DebugLoc DL = N->getDebugLoc();
2346 EVT VT = N->getValueType(0);
2348 // We're looking for an SRA/SHL pair which form an SBFX.
2350 if (VT != MVT::i32 && VT != MVT::i64)
2353 if (!isa<ConstantSDNode>(N->getOperand(1)))
2356 uint64_t TruncMask = N->getConstantOperandVal(1);
2357 if (!isMask_64(TruncMask))
2360 uint64_t Width = CountPopulation_64(TruncMask);
2361 SDValue Shift = N->getOperand(0);
2363 if (Shift.getOpcode() != ISD::SRL)
2366 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
2368 uint64_t LSB = Shift->getConstantOperandVal(1);
2370 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
2373 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
2374 DAG.getConstant(LSB, MVT::i64),
2375 DAG.getConstant(LSB + Width - 1, MVT::i64));
2378 static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode,
2379 TargetLowering::DAGCombinerInfo &DCI) {
2380 // An atomic operation followed by an acquiring atomic fence can be reduced to
2381 // an acquiring load. The atomic operation provides a convenient pointer to
2382 // load from. If the original operation was a load anyway we can actually
2383 // combine the two operations into an acquiring load.
2384 SelectionDAG &DAG = DCI.DAG;
2385 SDValue AtomicOp = FenceNode->getOperand(0);
2386 AtomicSDNode *AtomicNode = dyn_cast<AtomicSDNode>(AtomicOp);
2388 // A fence on its own can't be optimised
2392 AtomicOrdering FenceOrder
2393 = static_cast<AtomicOrdering>(FenceNode->getConstantOperandVal(1));
2394 SynchronizationScope FenceScope
2395 = static_cast<SynchronizationScope>(FenceNode->getConstantOperandVal(2));
2397 if (FenceOrder != Acquire || FenceScope != AtomicNode->getSynchScope())
2400 // If the original operation was an ATOMIC_LOAD then we'll be replacing it, so
2401 // the chain we use should be its input, otherwise we'll put our store after
2402 // it so we use its output chain.
2403 SDValue Chain = AtomicNode->getOpcode() == ISD::ATOMIC_LOAD ?
2404 AtomicNode->getChain() : AtomicOp;
2406 // We have an acquire fence with a handy atomic operation nearby, we can
2407 // convert the fence into a load-acquire, discarding the result.
2408 DebugLoc DL = FenceNode->getDebugLoc();
2409 SDValue Op = DAG.getAtomic(ISD::ATOMIC_LOAD, DL, AtomicNode->getMemoryVT(),
2410 AtomicNode->getValueType(0),
2412 AtomicOp.getOperand(1), // Pointer
2413 AtomicNode->getMemOperand(), Acquire,
2416 if (AtomicNode->getOpcode() == ISD::ATOMIC_LOAD)
2417 DAG.ReplaceAllUsesWith(AtomicNode, Op.getNode());
2419 return Op.getValue(1);
2422 static SDValue PerformATOMIC_STORECombine(SDNode *N,
2423 TargetLowering::DAGCombinerInfo &DCI) {
2424 // A releasing atomic fence followed by an atomic store can be combined into a
2425 // single store operation.
2426 SelectionDAG &DAG = DCI.DAG;
2427 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(N);
2428 SDValue FenceOp = AtomicNode->getOperand(0);
2430 if (FenceOp.getOpcode() != ISD::ATOMIC_FENCE)
2433 AtomicOrdering FenceOrder
2434 = static_cast<AtomicOrdering>(FenceOp->getConstantOperandVal(1));
2435 SynchronizationScope FenceScope
2436 = static_cast<SynchronizationScope>(FenceOp->getConstantOperandVal(2));
2438 if (FenceOrder != Release || FenceScope != AtomicNode->getSynchScope())
2441 DebugLoc DL = AtomicNode->getDebugLoc();
2442 return DAG.getAtomic(ISD::ATOMIC_STORE, DL, AtomicNode->getMemoryVT(),
2443 FenceOp.getOperand(0), // Chain
2444 AtomicNode->getOperand(1), // Pointer
2445 AtomicNode->getOperand(2), // Value
2446 AtomicNode->getMemOperand(), Release,
2450 /// For a true bitfield insert, the bits getting into that contiguous mask
2451 /// should come from the low part of an existing value: they must be formed from
2452 /// a compatible SHL operation (unless they're already low). This function
2453 /// checks that condition and returns the least-significant bit that's
2454 /// intended. If the operation not a field preparation, -1 is returned.
2455 static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT,
2456 SDValue &MaskedVal, uint64_t Mask) {
2457 if (!isShiftedMask_64(Mask))
2460 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
2461 // instruction. BFI will do a left-shift by LSB before applying the mask we've
2462 // spotted, so in general we should pre-emptively "undo" that by making sure
2463 // the incoming bits have had a right-shift applied to them.
2465 // This right shift, however, will combine with existing left/right shifts. In
2466 // the simplest case of a completely straight bitfield operation, it will be
2467 // expected to completely cancel out with an existing SHL. More complicated
2468 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
2471 uint64_t LSB = CountTrailingZeros_64(Mask);
2472 int64_t ShiftRightRequired = LSB;
2473 if (MaskedVal.getOpcode() == ISD::SHL &&
2474 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2475 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
2476 MaskedVal = MaskedVal.getOperand(0);
2477 } else if (MaskedVal.getOpcode() == ISD::SRL &&
2478 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2479 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
2480 MaskedVal = MaskedVal.getOperand(0);
2483 if (ShiftRightRequired > 0)
2484 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
2485 DAG.getConstant(ShiftRightRequired, MVT::i64));
2486 else if (ShiftRightRequired < 0) {
2487 // We could actually end up with a residual left shift, for example with
2488 // "struc.bitfield = val << 1".
2489 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
2490 DAG.getConstant(-ShiftRightRequired, MVT::i64));
2496 /// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
2497 /// a mask and an extension. Returns true if a BFI was found and provides
2498 /// information on its surroundings.
2499 static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
2502 if (N.getOpcode() == ISD::ZERO_EXTEND) {
2504 N = N.getOperand(0);
2507 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2508 Mask = N->getConstantOperandVal(1);
2509 N = N.getOperand(0);
2511 // Mask is the whole width.
2512 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
2515 if (N.getOpcode() == AArch64ISD::BFI) {
2523 /// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
2524 /// is roughly equivalent to (and (BFI ...), mask). This form is used because it
2525 /// can often be further combined with a larger mask. Ultimately, we want mask
2526 /// to be 2^32-1 or 2^64-1 so the AND can be skipped.
2527 static SDValue tryCombineToBFI(SDNode *N,
2528 TargetLowering::DAGCombinerInfo &DCI,
2529 const AArch64Subtarget *Subtarget) {
2530 SelectionDAG &DAG = DCI.DAG;
2531 DebugLoc DL = N->getDebugLoc();
2532 EVT VT = N->getValueType(0);
2534 assert(N->getOpcode() == ISD::OR && "Unexpected root");
2536 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
2537 // abandon the effort.
2538 SDValue LHS = N->getOperand(0);
2539 if (LHS.getOpcode() != ISD::AND)
2543 if (isa<ConstantSDNode>(LHS.getOperand(1)))
2544 LHSMask = LHS->getConstantOperandVal(1);
2548 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
2549 // is or abandon the effort.
2550 SDValue RHS = N->getOperand(1);
2551 if (RHS.getOpcode() != ISD::AND)
2555 if (isa<ConstantSDNode>(RHS.getOperand(1)))
2556 RHSMask = RHS->getConstantOperandVal(1);
2560 // Can't do anything if the masks are incompatible.
2561 if (LHSMask & RHSMask)
2564 // Now we need one of the masks to be a contiguous field. Without loss of
2565 // generality that should be the RHS one.
2566 SDValue Bitfield = LHS.getOperand(0);
2567 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
2568 // We know that LHS is a candidate new value, and RHS isn't already a better
2570 std::swap(LHS, RHS);
2571 std::swap(LHSMask, RHSMask);
2574 // We've done our best to put the right operands in the right places, all we
2575 // can do now is check whether a BFI exists.
2576 Bitfield = RHS.getOperand(0);
2577 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
2581 uint32_t Width = CountPopulation_64(RHSMask);
2582 assert(Width && "Expected non-zero bitfield width");
2584 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
2585 LHS.getOperand(0), Bitfield,
2586 DAG.getConstant(LSB, MVT::i64),
2587 DAG.getConstant(Width, MVT::i64));
2590 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
2593 return DAG.getNode(ISD::AND, DL, VT, BFI,
2594 DAG.getConstant(LHSMask | RHSMask, VT));
2597 /// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
2598 /// original input. This is surprisingly common because SROA splits things up
2599 /// into i8 chunks, so the originally detected MaskedBFI may actually only act
2600 /// on the low (say) byte of a word. This is then orred into the rest of the
2601 /// word afterwards.
2603 /// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
2605 /// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
2606 /// MaskedBFI. We can also deal with a certain amount of extend/truncate being
2608 static SDValue tryCombineToLargerBFI(SDNode *N,
2609 TargetLowering::DAGCombinerInfo &DCI,
2610 const AArch64Subtarget *Subtarget) {
2611 SelectionDAG &DAG = DCI.DAG;
2612 DebugLoc DL = N->getDebugLoc();
2613 EVT VT = N->getValueType(0);
2615 // First job is to hunt for a MaskedBFI on either the left or right. Swap
2616 // operands if it's actually on the right.
2618 SDValue PossExtraMask;
2619 uint64_t ExistingMask = 0;
2620 bool Extended = false;
2621 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
2622 PossExtraMask = N->getOperand(1);
2623 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
2624 PossExtraMask = N->getOperand(0);
2628 // We can only combine a BFI with another compatible mask.
2629 if (PossExtraMask.getOpcode() != ISD::AND ||
2630 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
2633 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
2635 // Masks must be compatible.
2636 if (ExtraMask & ExistingMask)
2639 SDValue OldBFIVal = BFI.getOperand(0);
2640 SDValue NewBFIVal = BFI.getOperand(1);
2642 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
2643 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
2644 // need to be made compatible.
2645 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
2646 && "Invalid types for BFI");
2647 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
2648 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
2651 // We need the MaskedBFI to be combined with a mask of the *same* value.
2652 if (PossExtraMask.getOperand(0) != OldBFIVal)
2655 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
2656 OldBFIVal, NewBFIVal,
2657 BFI.getOperand(2), BFI.getOperand(3));
2659 // If the masking is trivial, we don't need to create it.
2660 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
2663 return DAG.getNode(ISD::AND, DL, VT, BFI,
2664 DAG.getConstant(ExtraMask | ExistingMask, VT));
2667 /// An EXTR instruction is made up of two shifts, ORed together. This helper
2668 /// searches for and classifies those shifts.
2669 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
2671 if (N.getOpcode() == ISD::SHL)
2673 else if (N.getOpcode() == ISD::SRL)
2678 if (!isa<ConstantSDNode>(N.getOperand(1)))
2681 ShiftAmount = N->getConstantOperandVal(1);
2682 Src = N->getOperand(0);
2686 /// EXTR instruction extracts a contiguous chunk of bits from two existing
2687 /// registers viewed as a high/low pair. This function looks for the pattern:
2688 /// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
2689 /// EXTR. Can't quite be done in TableGen because the two immediates aren't
2691 static SDValue tryCombineToEXTR(SDNode *N,
2692 TargetLowering::DAGCombinerInfo &DCI) {
2693 SelectionDAG &DAG = DCI.DAG;
2694 DebugLoc DL = N->getDebugLoc();
2695 EVT VT = N->getValueType(0);
2697 assert(N->getOpcode() == ISD::OR && "Unexpected root");
2699 if (VT != MVT::i32 && VT != MVT::i64)
2703 uint32_t ShiftLHS = 0;
2705 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
2709 uint32_t ShiftRHS = 0;
2711 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
2714 // If they're both trying to come from the high part of the register, they're
2715 // not really an EXTR.
2716 if (LHSFromHi == RHSFromHi)
2719 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
2723 std::swap(LHS, RHS);
2724 std::swap(ShiftLHS, ShiftRHS);
2727 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
2729 DAG.getConstant(ShiftRHS, MVT::i64));
2732 /// Target-specific dag combine xforms for ISD::OR
2733 static SDValue PerformORCombine(SDNode *N,
2734 TargetLowering::DAGCombinerInfo &DCI,
2735 const AArch64Subtarget *Subtarget) {
2737 SelectionDAG &DAG = DCI.DAG;
2738 EVT VT = N->getValueType(0);
2740 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2743 // Attempt to recognise bitfield-insert operations.
2744 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
2748 // Attempt to combine an existing MaskedBFI operation into one with a larger
2750 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
2754 Res = tryCombineToEXTR(N, DCI);
2761 /// Target-specific dag combine xforms for ISD::SRA
2762 static SDValue PerformSRACombine(SDNode *N,
2763 TargetLowering::DAGCombinerInfo &DCI) {
2765 SelectionDAG &DAG = DCI.DAG;
2766 DebugLoc DL = N->getDebugLoc();
2767 EVT VT = N->getValueType(0);
2769 // We're looking for an SRA/SHL pair which form an SBFX.
2771 if (VT != MVT::i32 && VT != MVT::i64)
2774 if (!isa<ConstantSDNode>(N->getOperand(1)))
2777 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
2778 SDValue Shift = N->getOperand(0);
2780 if (Shift.getOpcode() != ISD::SHL)
2783 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
2786 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
2787 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
2788 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
2790 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
2793 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
2794 DAG.getConstant(LSB, MVT::i64),
2795 DAG.getConstant(LSB + Width - 1, MVT::i64));
2800 AArch64TargetLowering::PerformDAGCombine(SDNode *N,
2801 DAGCombinerInfo &DCI) const {
2802 switch (N->getOpcode()) {
2804 case ISD::AND: return PerformANDCombine(N, DCI);
2805 case ISD::ATOMIC_FENCE: return PerformATOMIC_FENCECombine(N, DCI);
2806 case ISD::ATOMIC_STORE: return PerformATOMIC_STORECombine(N, DCI);
2807 case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
2808 case ISD::SRA: return PerformSRACombine(N, DCI);
2813 AArch64TargetLowering::ConstraintType
2814 AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
2815 if (Constraint.size() == 1) {
2816 switch (Constraint[0]) {
2818 case 'w': // An FP/SIMD vector register
2819 return C_RegisterClass;
2820 case 'I': // Constant that can be used with an ADD instruction
2821 case 'J': // Constant that can be used with a SUB instruction
2822 case 'K': // Constant that can be used with a 32-bit logical instruction
2823 case 'L': // Constant that can be used with a 64-bit logical instruction
2824 case 'M': // Constant that can be used as a 32-bit MOV immediate
2825 case 'N': // Constant that can be used as a 64-bit MOV immediate
2826 case 'Y': // Floating point constant zero
2827 case 'Z': // Integer constant zero
2829 case 'Q': // A memory reference with base register and no offset
2831 case 'S': // A symbolic address
2836 // FIXME: Ump, Utf, Usa, Ush
2837 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
2838 // whatever they may be
2839 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
2840 // Usa: An absolute symbolic address
2841 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
2842 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
2843 && Constraint != "Ush" && "Unimplemented constraints");
2845 return TargetLowering::getConstraintType(Constraint);
2848 TargetLowering::ConstraintWeight
2849 AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
2850 const char *Constraint) const {
2852 llvm_unreachable("Constraint weight unimplemented");
2856 AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2857 std::string &Constraint,
2858 std::vector<SDValue> &Ops,
2859 SelectionDAG &DAG) const {
2860 SDValue Result(0, 0);
2862 // Only length 1 constraints are C_Other.
2863 if (Constraint.size() != 1) return;
2865 // Only C_Other constraints get lowered like this. That means constants for us
2866 // so return early if there's no hope the constraint can be lowered.
2868 switch(Constraint[0]) {
2870 case 'I': case 'J': case 'K': case 'L':
2871 case 'M': case 'N': case 'Z': {
2872 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2876 uint64_t CVal = C->getZExtValue();
2879 switch (Constraint[0]) {
2881 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
2882 // is a peculiarly useless SUB constraint.
2883 llvm_unreachable("Unimplemented C_Other constraint");
2889 if (A64Imms::isLogicalImm(32, CVal, Bits))
2893 if (A64Imms::isLogicalImm(64, CVal, Bits))
2902 Result = DAG.getTargetConstant(CVal, Op.getValueType());
2906 // An absolute symbolic address or label reference.
2907 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
2908 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
2909 GA->getValueType(0));
2910 } else if (const BlockAddressSDNode *BA
2911 = dyn_cast<BlockAddressSDNode>(Op)) {
2912 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
2913 BA->getValueType(0));
2914 } else if (const ExternalSymbolSDNode *ES
2915 = dyn_cast<ExternalSymbolSDNode>(Op)) {
2916 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
2917 ES->getValueType(0));
2923 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2924 if (CFP->isExactlyValue(0.0)) {
2925 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
2932 if (Result.getNode()) {
2933 Ops.push_back(Result);
2937 // It's an unknown constraint for us. Let generic code have a go.
2938 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2941 std::pair<unsigned, const TargetRegisterClass*>
2942 AArch64TargetLowering::getRegForInlineAsmConstraint(
2943 const std::string &Constraint,
2945 if (Constraint.size() == 1) {
2946 switch (Constraint[0]) {
2948 if (VT.getSizeInBits() <= 32)
2949 return std::make_pair(0U, &AArch64::GPR32RegClass);
2950 else if (VT == MVT::i64)
2951 return std::make_pair(0U, &AArch64::GPR64RegClass);
2955 return std::make_pair(0U, &AArch64::FPR16RegClass);
2956 else if (VT == MVT::f32)
2957 return std::make_pair(0U, &AArch64::FPR32RegClass);
2958 else if (VT == MVT::f64)
2959 return std::make_pair(0U, &AArch64::FPR64RegClass);
2960 else if (VT.getSizeInBits() == 64)
2961 return std::make_pair(0U, &AArch64::VPR64RegClass);
2962 else if (VT == MVT::f128)
2963 return std::make_pair(0U, &AArch64::FPR128RegClass);
2964 else if (VT.getSizeInBits() == 128)
2965 return std::make_pair(0U, &AArch64::VPR128RegClass);
2970 // Use the default implementation in TargetLowering to convert the register
2971 // constraint into a member of a register class.
2972 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);