1 //===-- AArch64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // When profitable, replace GPR targeting i64 instructions with their
10 // AdvSIMD scalar equivalents. Generally speaking, "profitable" is defined
11 // as minimizing the number of cross-class register copies.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // TODO: Graph based predicate heuristics.
16 // Walking the instruction list linearly will get many, perhaps most, of
17 // the cases, but to do a truly thorough job of this, we need a more
18 // wholistic approach.
20 // This optimization is very similar in spirit to the register allocator's
21 // spill placement, only here we're determining where to place cross-class
22 // register copies rather than spills. As such, a similar approach is
25 // We want to build up a set of graphs of all instructions which are candidates
26 // for transformation along with instructions which generate their inputs and
27 // consume their outputs. For each edge in the graph, we assign a weight
28 // based on whether there is a copy required there (weight zero if not) and
29 // the block frequency of the block containing the defining or using
30 // instruction, whichever is less. Our optimization is then a graph problem
31 // to minimize the total weight of all the graphs, then transform instructions
32 // and add or remove copy instructions as called for to implement the
34 //===----------------------------------------------------------------------===//
37 #include "AArch64InstrInfo.h"
38 #include "AArch64RegisterInfo.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/CodeGen/MachineFunctionPass.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineRegisterInfo.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/raw_ostream.h"
50 #define DEBUG_TYPE "aarch64-simd-scalar"
52 // Allow forcing all i64 operations with equivalent SIMD instructions to use
53 // them. For stress-testing the transformation function.
55 TransformAll("aarch64-simd-scalar-force-all",
56 cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
57 cl::init(false), cl::Hidden);
59 STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used");
60 STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
61 STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
64 class AArch64AdvSIMDScalar : public MachineFunctionPass {
65 MachineRegisterInfo *MRI;
66 const AArch64InstrInfo *TII;
69 // isProfitableToTransform - Predicate function to determine whether an
70 // instruction should be transformed to its equivalent AdvSIMD scalar
71 // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
72 bool isProfitableToTransform(const MachineInstr *MI) const;
74 // transformInstruction - Perform the transformation of an instruction
75 // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
76 // to be the correct register class, minimizing cross-class copies.
77 void transformInstruction(MachineInstr *MI);
79 // processMachineBasicBlock - Main optimzation loop.
80 bool processMachineBasicBlock(MachineBasicBlock *MBB);
83 static char ID; // Pass identification, replacement for typeid.
84 explicit AArch64AdvSIMDScalar() : MachineFunctionPass(ID) {}
86 bool runOnMachineFunction(MachineFunction &F) override;
88 const char *getPassName() const override {
89 return "AdvSIMD Scalar Operation Optimization";
92 void getAnalysisUsage(AnalysisUsage &AU) const override {
94 MachineFunctionPass::getAnalysisUsage(AU);
97 char AArch64AdvSIMDScalar::ID = 0;
98 } // end anonymous namespace
100 static bool isGPR64(unsigned Reg, unsigned SubReg,
101 const MachineRegisterInfo *MRI) {
104 if (TargetRegisterInfo::isVirtualRegister(Reg))
105 return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
106 return AArch64::GPR64RegClass.contains(Reg);
109 static bool isFPR64(unsigned Reg, unsigned SubReg,
110 const MachineRegisterInfo *MRI) {
111 if (TargetRegisterInfo::isVirtualRegister(Reg))
112 return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
114 (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
115 SubReg == AArch64::dsub);
116 // Physical register references just check the register class directly.
117 return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
118 (AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub);
121 // getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64
122 // copy instruction. Return zero_reg if the instruction is not a copy.
123 static unsigned getSrcFromCopy(const MachineInstr *MI,
124 const MachineRegisterInfo *MRI,
127 // The "FMOV Xd, Dn" instruction is the typical form.
128 if (MI->getOpcode() == AArch64::FMOVDXr ||
129 MI->getOpcode() == AArch64::FMOVXDr)
130 return MI->getOperand(1).getReg();
131 // A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see
132 // these at this stage, but it's easy to check for.
133 if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
134 SubReg = AArch64::dsub;
135 return MI->getOperand(1).getReg();
137 // Or just a plain COPY instruction. This can be directly to/from FPR64,
138 // or it can be a dsub subreg reference to an FPR128.
139 if (MI->getOpcode() == AArch64::COPY) {
140 if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
142 isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
143 return MI->getOperand(1).getReg();
144 if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
146 isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(),
148 SubReg = MI->getOperand(1).getSubReg();
149 return MI->getOperand(1).getReg();
153 // Otherwise, this is some other kind of instruction.
157 // getTransformOpcode - For any opcode for which there is an AdvSIMD equivalent
158 // that we're considering transforming to, return that AdvSIMD opcode. For all
159 // others, return the original opcode.
160 static int getTransformOpcode(unsigned Opc) {
164 // FIXME: Lots more possibilities.
165 case AArch64::ADDXrr:
166 return AArch64::ADDv1i64;
167 case AArch64::SUBXrr:
168 return AArch64::SUBv1i64;
170 // No AdvSIMD equivalent, so just return the original opcode.
174 static bool isTransformable(const MachineInstr *MI) {
175 int Opc = MI->getOpcode();
176 return Opc != getTransformOpcode(Opc);
179 // isProfitableToTransform - Predicate function to determine whether an
180 // instruction should be transformed to its equivalent AdvSIMD scalar
181 // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
183 AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
184 // If this instruction isn't eligible to be transformed (no SIMD equivalent),
185 // early exit since that's the common case.
186 if (!isTransformable(MI))
189 // Count the number of copies we'll need to add and approximate the number
190 // of copies that a transform will enable us to remove.
191 unsigned NumNewCopies = 3;
192 unsigned NumRemovableCopies = 0;
194 unsigned OrigSrc0 = MI->getOperand(1).getReg();
195 unsigned OrigSrc1 = MI->getOperand(2).getReg();
196 unsigned Src0 = 0, SubReg0;
197 unsigned Src1 = 0, SubReg1;
198 if (!MRI->def_empty(OrigSrc0)) {
199 MachineRegisterInfo::def_instr_iterator Def =
200 MRI->def_instr_begin(OrigSrc0);
201 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
202 Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
203 // If the source was from a copy, we don't need to insert a new copy.
206 // If there are no other users of the original source, we can delete
208 if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0))
209 ++NumRemovableCopies;
211 if (!MRI->def_empty(OrigSrc1)) {
212 MachineRegisterInfo::def_instr_iterator Def =
213 MRI->def_instr_begin(OrigSrc1);
214 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
215 Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
218 // If there are no other users of the original source, we can delete
220 if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1))
221 ++NumRemovableCopies;
224 // If any of the uses of the original instructions is a cross class copy,
225 // that's a copy that will be removable if we transform. Likewise, if
226 // any of the uses is a transformable instruction, it's likely the tranforms
227 // will chain, enabling us to save a copy there, too. This is an aggressive
228 // heuristic that approximates the graph based cost analysis described above.
229 unsigned Dst = MI->getOperand(0).getReg();
230 bool AllUsesAreCopies = true;
231 for (MachineRegisterInfo::use_instr_nodbg_iterator
232 Use = MRI->use_instr_nodbg_begin(Dst),
233 E = MRI->use_instr_nodbg_end();
236 if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use))
237 ++NumRemovableCopies;
238 // If the use is an INSERT_SUBREG, that's still something that can
239 // directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's
240 // preferable to have it use the FPR64 in most cases, as if the source
241 // vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely.
242 // Ditto for a lane insert.
243 else if (Use->getOpcode() == AArch64::INSERT_SUBREG ||
244 Use->getOpcode() == AArch64::INSvi64gpr)
247 AllUsesAreCopies = false;
249 // If all of the uses of the original destination register are copies to
250 // FPR64, then we won't end up having a new copy back to GPR64 either.
251 if (AllUsesAreCopies)
254 // If a transform will not increase the number of cross-class copies required,
256 if (NumNewCopies <= NumRemovableCopies)
259 // Finally, even if we otherwise wouldn't transform, check if we're forcing
260 // transformation of everything.
264 static MachineInstr *insertCopy(const AArch64InstrInfo *TII, MachineInstr *MI,
265 unsigned Dst, unsigned Src, bool IsKill) {
266 MachineInstrBuilder MIB =
267 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY),
269 .addReg(Src, getKillRegState(IsKill));
270 DEBUG(dbgs() << " adding copy: " << *MIB);
275 // transformInstruction - Perform the transformation of an instruction
276 // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
277 // to be the correct register class, minimizing cross-class copies.
278 void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
279 DEBUG(dbgs() << "Scalar transform: " << *MI);
281 MachineBasicBlock *MBB = MI->getParent();
282 int OldOpc = MI->getOpcode();
283 int NewOpc = getTransformOpcode(OldOpc);
284 assert(OldOpc != NewOpc && "transform an instruction to itself?!");
286 // Check if we need a copy for the source registers.
287 unsigned OrigSrc0 = MI->getOperand(1).getReg();
288 unsigned OrigSrc1 = MI->getOperand(2).getReg();
289 unsigned Src0 = 0, SubReg0;
290 unsigned Src1 = 0, SubReg1;
291 if (!MRI->def_empty(OrigSrc0)) {
292 MachineRegisterInfo::def_instr_iterator Def =
293 MRI->def_instr_begin(OrigSrc0);
294 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
295 Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
296 // If there are no other users of the original source, we can delete
298 if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) {
299 assert(Src0 && "Can't delete copy w/o a valid original source!");
300 Def->eraseFromParent();
304 if (!MRI->def_empty(OrigSrc1)) {
305 MachineRegisterInfo::def_instr_iterator Def =
306 MRI->def_instr_begin(OrigSrc1);
307 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
308 Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
309 // If there are no other users of the original source, we can delete
311 if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) {
312 assert(Src1 && "Can't delete copy w/o a valid original source!");
313 Def->eraseFromParent();
317 // If we weren't able to reference the original source directly, create a
321 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
322 insertCopy(TII, MI, Src0, OrigSrc0, true);
326 Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
327 insertCopy(TII, MI, Src1, OrigSrc1, true);
330 // Create a vreg for the destination.
331 // FIXME: No need to do this if the ultimate user expects an FPR64.
332 // Check for that and avoid the copy if possible.
333 unsigned Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
335 // For now, all of the new instructions have the same simple three-register
336 // form, so no need to special case based on what instruction we're
338 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst)
339 .addReg(Src0, getKillRegState(true), SubReg0)
340 .addReg(Src1, getKillRegState(true), SubReg1);
342 // Now copy the result back out to a GPR.
343 // FIXME: Try to avoid this if all uses could actually just use the FPR64
345 insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true);
347 // Erase the old instruction.
348 MI->eraseFromParent();
350 ++NumScalarInsnsUsed;
353 // processMachineBasicBlock - Main optimzation loop.
354 bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
355 bool Changed = false;
356 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
357 MachineInstr *MI = I;
359 if (isProfitableToTransform(MI)) {
360 transformInstruction(MI);
367 // runOnMachineFunction - Pass entry point from PassManager.
368 bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
369 bool Changed = false;
370 DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
372 const TargetMachine &TM = mf.getTarget();
373 MRI = &mf.getRegInfo();
374 TII = static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());
376 // Just check things on a one-block-at-a-time basis.
377 for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
378 if (processMachineBasicBlock(I))
383 // createAArch64AdvSIMDScalar - Factory function used by AArch64TargetMachine
384 // to add the pass to the PassManager.
385 FunctionPass *llvm::createAArch64AdvSIMDScalar() {
386 return new AArch64AdvSIMDScalar();