1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "aarch64-isel"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64Subtarget.h"
18 #include "AArch64TargetMachine.h"
19 #include "Utils/AArch64BaseInfo.h"
20 #include "llvm/ADT/APSInt.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/IR/GlobalValue.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
28 //===--------------------------------------------------------------------===//
29 /// AArch64 specific code to select AArch64 machine instructions for
30 /// SelectionDAG operations.
34 class AArch64DAGToDAGISel : public SelectionDAGISel {
35 AArch64TargetMachine &TM;
36 const AArch64InstrInfo *TII;
38 /// Keep a pointer to the AArch64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const AArch64Subtarget *Subtarget;
43 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
44 CodeGenOpt::Level OptLevel)
45 : SelectionDAGISel(tm, OptLevel), TM(tm),
46 TII(static_cast<const AArch64InstrInfo*>(TM.getInstrInfo())),
47 Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
50 virtual const char *getPassName() const {
51 return "AArch64 Instruction Selection";
54 // Include the pieces autogenerated from the target description.
55 #include "AArch64GenDAGISel.inc"
57 template<unsigned MemSize>
58 bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
59 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
60 if (!CN || CN->getZExtValue() % MemSize != 0
61 || CN->getZExtValue() / MemSize > 0xfff)
64 UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
68 template<unsigned RegWidth>
69 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
70 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
73 /// Used for pre-lowered address-reference nodes, so we already know
74 /// the fields match. This operand's job is simply to add an
75 /// appropriate shift operand (i.e. 0) to the MOVZ/MOVK instruction.
76 bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
78 Shift = CurDAG->getTargetConstant(0, MVT::i32);
82 bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
84 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
87 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
89 std::vector<SDValue> &OutOps);
91 bool SelectLogicalImm(SDValue N, SDValue &Imm);
93 template<unsigned RegWidth>
94 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
95 return SelectTSTBOperand(N, FixedPos, RegWidth);
98 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
100 SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32,
103 /// Put the given constant into a pool and return a DAG which will give its
105 SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV);
107 SDNode *TrySelectToMoveImm(SDNode *N);
108 SDNode *LowerToFPLitPool(SDNode *Node);
109 SDNode *SelectToLitPool(SDNode *N);
111 SDNode* Select(SDNode*);
117 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
119 const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
120 if (!CN) return false;
122 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
123 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
126 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
127 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
131 // fbits is between 1 and 64 in the worst-case, which means the fmul
132 // could have 2^64 as an actual operand. Need 65 bits of precision.
133 APSInt IntVal(65, true);
134 CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
136 // N.b. isPowerOf2 also checks for > 0.
137 if (!IsExact || !IntVal.isPowerOf2()) return false;
138 unsigned FBits = IntVal.logBase2();
140 // Checks above should have guaranteed that we haven't lost information in
141 // finding FBits, but it must still be in range.
142 if (FBits == 0 || FBits > RegWidth) return false;
144 FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
149 AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
151 std::vector<SDValue> &OutOps) {
152 switch (ConstraintCode) {
153 default: llvm_unreachable("Unrecognised AArch64 memory constraint");
155 // FIXME: more freedom is actually permitted for 'm'. We can go
156 // hunting for a base and an offset if we want. Of course, since
157 // we don't really know how the operand is going to be used we're
158 // probably restricted to the load/store pair's simm7 as an offset
161 OutOps.push_back(Op);
168 AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
169 ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
170 if (!Imm || !Imm->getValueAPF().isPosZero())
173 // Doesn't actually carry any information, but keeps TableGen quiet.
174 Dummy = CurDAG->getTargetConstant(0, MVT::i32);
178 bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
180 uint32_t RegWidth = N.getValueType().getSizeInBits();
182 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
183 if (!CN) return false;
185 if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
188 Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
192 SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
195 EVT DestType = Node->getValueType(0);
196 unsigned DestWidth = DestType.getSizeInBits();
201 uint32_t LogicalBits;
203 uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
204 if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
206 MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
207 } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
209 MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
210 } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
211 // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
212 // use a 32-bit instruction: "movn w0, 0xedbc".
214 MOVOpcode = AArch64::MOVNwii;
215 } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) {
216 MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
217 uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
219 return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
220 CurDAG->getRegister(ZR, DestType),
221 CurDAG->getTargetConstant(LogicalBits, MVT::i32));
223 // Can't handle it in one instruction. There's scope for permitting two (or
224 // more) instructions, but that'll need more thought.
228 ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
229 CurDAG->getTargetConstant(UImm16, MVT::i32),
230 CurDAG->getTargetConstant(Shift, MVT::i32));
232 if (MOVType != DestType) {
233 ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
234 MVT::i64, MVT::i32, MVT::Other,
235 CurDAG->getTargetConstant(0, MVT::i64),
237 CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
244 AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
245 const Constant *CV) {
246 EVT PtrVT = TLI->getPointerTy();
248 switch (TLI->getTargetMachine().getCodeModel()) {
249 case CodeModel::Small: {
251 TLI->getDataLayout()->getABITypeAlignment(CV->getType());
252 return CurDAG->getNode(
253 AArch64ISD::WrapperSmall, DL, PtrVT,
254 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
255 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12),
256 CurDAG->getConstant(Alignment, MVT::i32));
258 case CodeModel::Large: {
260 LitAddr = CurDAG->getMachineNode(
261 AArch64::MOVZxii, DL, PtrVT,
262 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
263 CurDAG->getTargetConstant(0, MVT::i32));
264 LitAddr = CurDAG->getMachineNode(
265 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
266 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
267 CurDAG->getTargetConstant(0, MVT::i32));
268 LitAddr = CurDAG->getMachineNode(
269 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
270 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
271 CurDAG->getTargetConstant(0, MVT::i32));
272 LitAddr = CurDAG->getMachineNode(
273 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
274 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
275 CurDAG->getTargetConstant(0, MVT::i32));
276 return SDValue(LitAddr, 0);
279 llvm_unreachable("Only small and large code models supported now");
283 SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
285 uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
286 int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
287 EVT DestType = Node->getValueType(0);
289 // Since we may end up loading a 64-bit constant from a 32-bit entry the
290 // constant in the pool may have a different type to the eventual node.
291 ISD::LoadExtType Extension;
294 assert((DestType == MVT::i64 || DestType == MVT::i32)
295 && "Only expect integer constants at the moment");
297 if (DestType == MVT::i32) {
298 Extension = ISD::NON_EXTLOAD;
300 } else if (UnsignedVal <= UINT32_MAX) {
301 Extension = ISD::ZEXTLOAD;
303 } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
304 Extension = ISD::SEXTLOAD;
307 Extension = ISD::NON_EXTLOAD;
311 Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
312 MemType.getSizeInBits()),
314 SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
315 unsigned Alignment = TLI->getDataLayout()->getABITypeAlignment(CV->getType());
317 return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
319 MachinePointerInfo::getConstantPool(), MemType,
320 /* isVolatile = */ false,
321 /* isNonTemporal = */ false,
322 Alignment).getNode();
325 SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
327 const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
328 EVT DestType = Node->getValueType(0);
330 unsigned Alignment = TLI->getDataLayout()->getABITypeAlignment(FV->getType());
331 SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
333 return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
334 MachinePointerInfo::getConstantPool(),
335 /* isVolatile = */ false,
336 /* isNonTemporal = */ false,
337 /* isInvariant = */ true,
338 Alignment).getNode();
342 AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
344 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
345 if (!CN) return false;
347 uint64_t Val = CN->getZExtValue();
349 if (!isPowerOf2_64(Val)) return false;
351 unsigned TestedBit = Log2_64(Val);
352 // Checks above should have guaranteed that we haven't lost information in
353 // finding TestedBit, but it must still be in range.
354 if (TestedBit >= RegWidth) return false;
356 FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
360 SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
361 unsigned Op16,unsigned Op32,
363 // Mostly direct translation to the given operations, except that we preserve
364 // the AtomicOrdering for use later on.
365 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
366 EVT VT = AN->getMemoryVT();
371 else if (VT == MVT::i16)
373 else if (VT == MVT::i32)
375 else if (VT == MVT::i64)
378 llvm_unreachable("Unexpected atomic operation");
380 SmallVector<SDValue, 4> Ops;
381 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
382 Ops.push_back(AN->getOperand(i));
384 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
385 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
387 return CurDAG->SelectNodeTo(Node, Op,
388 AN->getValueType(0), MVT::Other,
389 &Ops[0], Ops.size());
392 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
393 // Dump information about the Node being selected
394 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
396 if (Node->isMachineOpcode()) {
397 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
401 switch (Node->getOpcode()) {
402 case ISD::ATOMIC_LOAD_ADD:
403 return SelectAtomic(Node,
404 AArch64::ATOMIC_LOAD_ADD_I8,
405 AArch64::ATOMIC_LOAD_ADD_I16,
406 AArch64::ATOMIC_LOAD_ADD_I32,
407 AArch64::ATOMIC_LOAD_ADD_I64);
408 case ISD::ATOMIC_LOAD_SUB:
409 return SelectAtomic(Node,
410 AArch64::ATOMIC_LOAD_SUB_I8,
411 AArch64::ATOMIC_LOAD_SUB_I16,
412 AArch64::ATOMIC_LOAD_SUB_I32,
413 AArch64::ATOMIC_LOAD_SUB_I64);
414 case ISD::ATOMIC_LOAD_AND:
415 return SelectAtomic(Node,
416 AArch64::ATOMIC_LOAD_AND_I8,
417 AArch64::ATOMIC_LOAD_AND_I16,
418 AArch64::ATOMIC_LOAD_AND_I32,
419 AArch64::ATOMIC_LOAD_AND_I64);
420 case ISD::ATOMIC_LOAD_OR:
421 return SelectAtomic(Node,
422 AArch64::ATOMIC_LOAD_OR_I8,
423 AArch64::ATOMIC_LOAD_OR_I16,
424 AArch64::ATOMIC_LOAD_OR_I32,
425 AArch64::ATOMIC_LOAD_OR_I64);
426 case ISD::ATOMIC_LOAD_XOR:
427 return SelectAtomic(Node,
428 AArch64::ATOMIC_LOAD_XOR_I8,
429 AArch64::ATOMIC_LOAD_XOR_I16,
430 AArch64::ATOMIC_LOAD_XOR_I32,
431 AArch64::ATOMIC_LOAD_XOR_I64);
432 case ISD::ATOMIC_LOAD_NAND:
433 return SelectAtomic(Node,
434 AArch64::ATOMIC_LOAD_NAND_I8,
435 AArch64::ATOMIC_LOAD_NAND_I16,
436 AArch64::ATOMIC_LOAD_NAND_I32,
437 AArch64::ATOMIC_LOAD_NAND_I64);
438 case ISD::ATOMIC_LOAD_MIN:
439 return SelectAtomic(Node,
440 AArch64::ATOMIC_LOAD_MIN_I8,
441 AArch64::ATOMIC_LOAD_MIN_I16,
442 AArch64::ATOMIC_LOAD_MIN_I32,
443 AArch64::ATOMIC_LOAD_MIN_I64);
444 case ISD::ATOMIC_LOAD_MAX:
445 return SelectAtomic(Node,
446 AArch64::ATOMIC_LOAD_MAX_I8,
447 AArch64::ATOMIC_LOAD_MAX_I16,
448 AArch64::ATOMIC_LOAD_MAX_I32,
449 AArch64::ATOMIC_LOAD_MAX_I64);
450 case ISD::ATOMIC_LOAD_UMIN:
451 return SelectAtomic(Node,
452 AArch64::ATOMIC_LOAD_UMIN_I8,
453 AArch64::ATOMIC_LOAD_UMIN_I16,
454 AArch64::ATOMIC_LOAD_UMIN_I32,
455 AArch64::ATOMIC_LOAD_UMIN_I64);
456 case ISD::ATOMIC_LOAD_UMAX:
457 return SelectAtomic(Node,
458 AArch64::ATOMIC_LOAD_UMAX_I8,
459 AArch64::ATOMIC_LOAD_UMAX_I16,
460 AArch64::ATOMIC_LOAD_UMAX_I32,
461 AArch64::ATOMIC_LOAD_UMAX_I64);
462 case ISD::ATOMIC_SWAP:
463 return SelectAtomic(Node,
464 AArch64::ATOMIC_SWAP_I8,
465 AArch64::ATOMIC_SWAP_I16,
466 AArch64::ATOMIC_SWAP_I32,
467 AArch64::ATOMIC_SWAP_I64);
468 case ISD::ATOMIC_CMP_SWAP:
469 return SelectAtomic(Node,
470 AArch64::ATOMIC_CMP_SWAP_I8,
471 AArch64::ATOMIC_CMP_SWAP_I16,
472 AArch64::ATOMIC_CMP_SWAP_I32,
473 AArch64::ATOMIC_CMP_SWAP_I64);
474 case ISD::FrameIndex: {
475 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
476 EVT PtrTy = TLI->getPointerTy();
477 SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
478 return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
479 TFI, CurDAG->getTargetConstant(0, PtrTy));
481 case ISD::ConstantPool: {
482 // Constant pools are fine, just create a Target entry.
483 ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
484 const Constant *C = CN->getConstVal();
485 SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
487 ReplaceUses(SDValue(Node, 0), CP);
490 case ISD::Constant: {
492 if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
493 // XZR and WZR are probably even better than an actual move: most of the
494 // time they can be folded into another instruction with *no* cost.
496 EVT Ty = Node->getValueType(0);
497 assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
498 uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
499 ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
501 Register, Ty).getNode();
504 // Next best option is a move-immediate, see if we can do that.
506 ResNode = TrySelectToMoveImm(Node);
512 // If even that fails we fall back to a lit-pool entry at the moment. Future
513 // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
514 ResNode = SelectToLitPool(Node);
515 assert(ResNode && "We need *some* way to materialise a constant");
517 // We want to continue selection at this point since the litpool access
518 // generated used generic nodes for simplicity.
519 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
523 case ISD::ConstantFP: {
524 if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
525 // FMOV will take care of it from TableGen
529 SDNode *ResNode = LowerToFPLitPool(Node);
530 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
532 // We want to continue selection at this point since the litpool access
533 // generated used generic nodes for simplicity.
538 break; // Let generic code handle it
541 SDNode *ResNode = SelectCode(Node);
543 DEBUG(dbgs() << "=> ";
544 if (ResNode == NULL || ResNode == Node)
547 ResNode->dump(CurDAG);
553 /// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
554 /// instruction scheduling.
555 FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
556 CodeGenOpt::Level OptLevel) {
557 return new AArch64DAGToDAGISel(TM, OptLevel);