1 //===--------- MipsOptimizePICCall.cpp - Optimize PIC Calls ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass eliminates unnecessary instructions that set up $gp and replace
11 // instructions that load target function addresses with copy instructions.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "optimize-mips-pic-call"
18 #include "MCTargetDesc/MipsBaseInfo.h"
19 #include "MipsMachineFunction.h"
20 #include "MipsTargetMachine.h"
21 #include "llvm/ADT/ScopedHashTable.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/Support/CommandLine.h"
28 static cl::opt<bool> LoadTargetFromGOT("mips-load-target-from-got",
30 cl::desc("Load target address from GOT"),
33 static cl::opt<bool> EraseGPOpnd("mips-erase-gp-opnd",
34 cl::init(true), cl::desc("Erase GP Operand"),
38 typedef std::pair<unsigned, unsigned> CntRegP;
39 typedef RecyclingAllocator<BumpPtrAllocator,
40 ScopedHashTableVal<const Value *, CntRegP> >
42 typedef ScopedHashTable<const Value *, CntRegP, DenseMapInfo<const Value *>,
43 AllocatorTy> ScopedHTType;
47 MBBInfo(MachineDomTreeNode *N);
48 const MachineDomTreeNode *getNode() const;
49 bool isVisited() const;
50 void preVisit(ScopedHTType &ScopedHT);
54 MachineDomTreeNode *Node;
55 ScopedHTType::ScopeTy *HTScope;
58 class OptimizePICCall : public MachineFunctionPass {
60 OptimizePICCall(TargetMachine &tm) : MachineFunctionPass(ID) {}
62 virtual const char *getPassName() const { return "Mips OptimizePICCall"; }
64 bool runOnMachineFunction(MachineFunction &F);
66 void getAnalysisUsage(AnalysisUsage &AU) const {
67 AU.addRequired<MachineDominatorTree>();
68 MachineFunctionPass::getAnalysisUsage(AU);
73 bool visitNode(MBBInfo &MBBI);
75 /// \brief Test if MI jumps to a function via a register.
77 /// Also, return the virtual register containing the target function's address
78 /// and the underlying object in Reg and Val respectively, if the function's
79 /// address can be resolved lazily.
80 bool isCallViaRegister(MachineInstr &MI, unsigned &Reg,
81 const Value *&Val) const;
83 /// \brief Return the number of instructions that dominate the current
84 /// instruction and load the function address from object Entry.
85 unsigned getCount(const Value *Entry);
87 /// \brief Return the destination virtual register of the last instruction
88 /// that loads from object Entry.
89 unsigned getReg(const Value *Entry);
91 /// \brief Update ScopedHT.
92 void incCntAndSetReg(const Value *Entry, unsigned Reg);
94 ScopedHTType ScopedHT;
98 char OptimizePICCall::ID = 0;
99 } // end of anonymous namespace
101 /// Return the first MachineOperand of MI if it is a used virtual register.
102 static MachineOperand *getCallTargetRegOpnd(MachineInstr &MI) {
103 if (MI.getNumOperands() == 0)
106 MachineOperand &MO = MI.getOperand(0);
108 if (!MO.isReg() || !MO.isUse() ||
109 !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
115 /// Return type of register Reg.
116 static MVT::SimpleValueType getRegTy(unsigned Reg, MachineFunction &MF) {
117 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
118 assert(RC->vt_end() - RC->vt_begin() == 1);
119 return *RC->vt_begin();
122 /// Do the following transformation:
128 static void setCallTargetReg(MachineBasicBlock *MBB,
129 MachineBasicBlock::iterator I) {
130 MachineFunction &MF = *MBB->getParent();
131 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
132 unsigned SrcReg = I->getOperand(0).getReg();
133 unsigned DstReg = getRegTy(SrcReg, MF) == MVT::i32 ? Mips::T9 : Mips::T9_64;
134 BuildMI(*MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), DstReg)
136 I->getOperand(0).setReg(DstReg);
139 /// Search MI's operands for register GP and erase it.
140 static void eraseGPOpnd(MachineInstr &MI) {
144 MachineFunction &MF = *MI.getParent()->getParent();
145 MVT::SimpleValueType Ty = getRegTy(MI.getOperand(0).getReg(), MF);
146 unsigned Reg = Ty == MVT::i32 ? Mips::GP : Mips::GP_64;
148 for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
149 MachineOperand &MO = MI.getOperand(I);
150 if (MO.isReg() && MO.getReg() == Reg) {
159 MBBInfo::MBBInfo(MachineDomTreeNode *N) : Node(N), HTScope(0) {}
161 const MachineDomTreeNode *MBBInfo::getNode() const { return Node; }
163 bool MBBInfo::isVisited() const { return HTScope; }
165 void MBBInfo::preVisit(ScopedHTType &ScopedHT) {
166 HTScope = new ScopedHTType::ScopeTy(ScopedHT);
169 void MBBInfo::postVisit() {
173 // OptimizePICCall methods.
174 bool OptimizePICCall::runOnMachineFunction(MachineFunction &F) {
175 if (F.getTarget().getSubtarget<MipsSubtarget>().inMips16Mode())
178 // Do a pre-order traversal of the dominator tree.
179 MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
180 bool Changed = false;
182 SmallVector<MBBInfo, 8> WorkList(1, MBBInfo(MDT->getRootNode()));
184 while (!WorkList.empty()) {
185 MBBInfo &MBBI = WorkList.back();
187 // If this MBB has already been visited, destroy the scope for the MBB and
188 // pop it from the work list.
189 if (MBBI.isVisited()) {
195 // Visit the MBB and add its children to the work list.
196 MBBI.preVisit(ScopedHT);
197 Changed |= visitNode(MBBI);
198 const MachineDomTreeNode *Node = MBBI.getNode();
199 const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
200 WorkList.append(Children.begin(), Children.end());
206 bool OptimizePICCall::visitNode(MBBInfo &MBBI) {
207 bool Changed = false;
208 MachineBasicBlock *MBB = MBBI.getNode()->getBlock();
210 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
215 // Skip instructions that are not call instructions via registers.
216 if (!isCallViaRegister(*I, Reg, Entry))
220 unsigned N = getCount(Entry);
223 // If a function has been called more than twice, we do not have to emit a
224 // load instruction to get the function address from the GOT, but can
225 // instead reuse the address that has been loaded before.
226 if (N >= 2 && !LoadTargetFromGOT)
227 getCallTargetRegOpnd(*I)->setReg(getReg(Entry));
229 // Erase the $gp operand if this isn't the first time a function has
230 // been called. $gp needs to be set up only if the function call can go
231 // through a lazy binding stub.
236 incCntAndSetReg(Entry, Reg);
238 setCallTargetReg(MBB, I);
244 bool OptimizePICCall::isCallViaRegister(MachineInstr &MI, unsigned &Reg,
245 const Value *&Val) const {
249 MachineOperand *MO = getCallTargetRegOpnd(MI);
251 // Return if MI is not a function call via a register.
255 // Get the instruction that loads the function address from the GOT.
258 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
259 MachineInstr *DefMI = MRI.getVRegDef(Reg);
263 // See if DefMI is an instruction that loads from a GOT entry that holds the
264 // address of a lazy binding stub.
265 if (!DefMI->mayLoad() || DefMI->getNumOperands() < 3)
268 unsigned Flags = DefMI->getOperand(2).getTargetFlags();
270 if (Flags != MipsII::MO_GOT_CALL && Flags != MipsII::MO_CALL_LO16)
273 // Return the underlying object for the GOT entry in Val.
274 assert(DefMI->hasOneMemOperand());
275 Val = (*DefMI->memoperands_begin())->getValue();
279 unsigned OptimizePICCall::getCount(const Value *Entry) {
280 return ScopedHT.lookup(Entry).first;
283 unsigned OptimizePICCall::getReg(const Value *Entry) {
284 unsigned Reg = ScopedHT.lookup(Entry).second;
289 void OptimizePICCall::incCntAndSetReg(const Value *Entry, unsigned Reg) {
290 CntRegP P = ScopedHT.lookup(Entry);
291 ScopedHT.insert(Entry, std::make_pair(P.first + 1, Reg));
294 /// Return an OptimizeCall object.
295 FunctionPass *llvm::createMipsOptimizePICCallPass(MipsTargetMachine &TM) {
296 return new OptimizePICCall(TM);