1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetRegisterInfo
13 //===----------------------------------------------------------------------===//
16 #include "AArch64RegisterInfo.h"
17 #include "AArch64FrameLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "MCTargetDesc/AArch64MCTargetDesc.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
27 #define GET_REGINFO_TARGET_DESC
28 #include "AArch64GenRegisterInfo.inc"
32 AArch64RegisterInfo::AArch64RegisterInfo()
33 : AArch64GenRegisterInfo(AArch64::X30) {
37 AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
38 return CSR_PCS_SaveList;
42 AArch64RegisterInfo::getCallPreservedMask(CallingConv::ID) const {
43 return CSR_PCS_RegMask;
46 const uint32_t *AArch64RegisterInfo::getTLSDescCallPreservedMask() const {
47 return TLSDesc_RegMask;
50 const TargetRegisterClass *
51 AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
52 if (RC == &AArch64::FlagClassRegClass)
53 return &AArch64::GPR64RegClass;
61 AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
62 BitVector Reserved(getNumRegs());
63 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
65 Reserved.set(AArch64::XSP);
66 Reserved.set(AArch64::WSP);
68 Reserved.set(AArch64::XZR);
69 Reserved.set(AArch64::WZR);
72 Reserved.set(AArch64::X29);
73 Reserved.set(AArch64::W29);
79 static bool hasFrameOffset(int opcode) {
80 return opcode != AArch64::LD1x2_8B && opcode != AArch64::LD1x3_8B &&
81 opcode != AArch64::LD1x4_8B && opcode != AArch64::ST1x2_8B &&
82 opcode != AArch64::ST1x3_8B && opcode != AArch64::ST1x4_8B &&
83 opcode != AArch64::LD1x2_16B && opcode != AArch64::LD1x3_16B &&
84 opcode != AArch64::LD1x4_16B && opcode != AArch64::ST1x2_16B &&
85 opcode != AArch64::ST1x3_16B && opcode != AArch64::ST1x4_16B;
89 AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI,
91 unsigned FIOperandNum,
92 RegScavenger *RS) const {
93 assert(SPAdj == 0 && "Cannot deal with nonzero SPAdj yet");
94 MachineInstr &MI = *MBBI;
95 MachineBasicBlock &MBB = *MI.getParent();
96 MachineFunction &MF = *MBB.getParent();
97 MachineFrameInfo *MFI = MF.getFrameInfo();
98 const AArch64FrameLowering *TFI =
99 static_cast<const AArch64FrameLowering *>(MF.getTarget().getFrameLowering());
101 // In order to work out the base and offset for addressing, the FrameLowering
102 // code needs to know (sometimes) whether the instruction is storing/loading a
103 // callee-saved register, or whether it's a more generic
104 // operation. Fortunately the frame indices are used *only* for that purpose
105 // and are contiguous, so we can check here.
106 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
111 MinCSFI = CSI[0].getFrameIdx();
112 MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
115 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
116 bool IsCalleeSaveOp = FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI;
120 Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj,
122 // A vector load/store instruction doesn't have an offset operand.
123 bool HasOffsetOp = hasFrameOffset(MI.getOpcode());
125 Offset += MI.getOperand(FIOperandNum + 1).getImm();
127 // DBG_VALUE instructions have no real restrictions so they can be handled
129 if (MI.isDebugValue()) {
130 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*isDef=*/ false);
131 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
135 const AArch64InstrInfo &TII =
136 *static_cast<const AArch64InstrInfo*>(MF.getTarget().getInstrInfo());
137 int MinOffset, MaxOffset, OffsetScale;
138 if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s || !HasOffsetOp) {
143 // Load/store of a stack object
144 TII.getAddressConstraints(MI, OffsetScale, MinOffset, MaxOffset);
147 // There are two situations we don't use frame + offset directly in the
149 // (1) The offset can't really be scaled
150 // (2) Can't encode offset as it doesn't have an offset operand
151 if ((Offset % OffsetScale != 0 || Offset < MinOffset || Offset > MaxOffset) ||
152 (!HasOffsetOp && Offset != 0)) {
154 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
155 emitRegUpdate(MBB, MBBI, MBBI->getDebugLoc(), TII,
156 BaseReg, FrameReg, BaseReg, Offset);
161 // Negative offsets are expected if we address from FP, but for
162 // now this checks nothing has gone horribly wrong.
163 assert(Offset >= 0 && "Unexpected negative offset from SP");
165 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, true);
167 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset / OffsetScale);
171 AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
172 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
181 AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
182 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
183 const AArch64FrameLowering *AFI
184 = static_cast<const AArch64FrameLowering*>(TFI);
185 return AFI->useFPForAddressing(MF);