1 //===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 // ds_read_b32 v0, v2 offset:16
13 // ds_read_b32 v1, v2 offset:32
15 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
18 // Future improvements:
20 // - This currently relies on the scheduler to place loads and stores next to
21 // each other, and then only merges adjacent pairs of instructions. It would
22 // be good to be more flexible with interleaved instructions, and possibly run
23 // before scheduling. It currently missing stores of constants because loading
24 // the constant into the data register is placed between the stores, although
25 // this is arguably a scheduling problem.
27 // - Live interval recomputing seems inefficient. This currently only matches
28 // one pair, and recomputes live intervals and moves on to the next pair. It
29 // would be better to compute a list of all merges that need to occur
31 // - With a list of instructions to process, we can also merge more. If a
32 // cluster of loads have offsets that are too large to fit in the 8-bit
33 // offsets, but are close enough to fit in the 8 bits, we can add to the base
34 // pointer and use the new reduced offsets.
36 //===----------------------------------------------------------------------===//
39 #include "SIInstrInfo.h"
40 #include "SIRegisterInfo.h"
41 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
42 #include "llvm/CodeGen/LiveVariables.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstrBuilder.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Target/TargetMachine.h"
53 #define DEBUG_TYPE "si-load-store-opt"
57 class SILoadStoreOptimizer : public MachineFunctionPass {
59 const SIInstrInfo *TII;
60 const SIRegisterInfo *TRI;
61 MachineRegisterInfo *MRI;
65 static bool offsetsCanBeCombined(unsigned Offset0,
69 MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I,
72 void updateRegDefsUses(unsigned SrcReg,
76 MachineBasicBlock::iterator mergeRead2Pair(
77 MachineBasicBlock::iterator I,
78 MachineBasicBlock::iterator Paired,
81 MachineBasicBlock::iterator mergeWrite2Pair(
82 MachineBasicBlock::iterator I,
83 MachineBasicBlock::iterator Paired,
89 SILoadStoreOptimizer()
90 : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
93 SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
94 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
97 bool optimizeBlock(MachineBasicBlock &MBB);
99 bool runOnMachineFunction(MachineFunction &MF) override;
101 const char *getPassName() const override {
102 return "SI Load / Store Optimizer";
105 void getAnalysisUsage(AnalysisUsage &AU) const override {
106 AU.setPreservesCFG();
107 AU.addPreserved<SlotIndexes>();
108 AU.addPreserved<LiveIntervals>();
109 AU.addPreserved<LiveVariables>();
110 AU.addRequired<LiveIntervals>();
112 MachineFunctionPass::getAnalysisUsage(AU);
116 } // End anonymous namespace.
118 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
119 "SI Load / Store Optimizer", false, false)
120 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
121 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
122 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
123 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124 "SI Load / Store Optimizer", false, false)
126 char SILoadStoreOptimizer::ID = 0;
128 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
130 FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
131 return new SILoadStoreOptimizer(TM);
134 bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
137 // XXX - Would the same offset be OK? Is there any reason this would happen or
139 if (Offset0 == Offset1)
142 // This won't be valid if the offset isn't aligned.
143 if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
146 unsigned EltOffset0 = Offset0 / Size;
147 unsigned EltOffset1 = Offset1 / Size;
149 // Check if the new offsets fit in the reduced 8-bit range.
150 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
153 // If the offset in elements doesn't fit in 8-bits, we might be able to use
154 // the stride 64 versions.
155 if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
158 return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
161 MachineBasicBlock::iterator
162 SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
164 MachineBasicBlock::iterator E = I->getParent()->end();
165 MachineBasicBlock::iterator MBBI = I;
168 if (MBBI->getOpcode() != I->getOpcode())
171 // Don't merge volatiles.
172 if (MBBI->hasOrderedMemoryRef())
175 int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
176 const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
177 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
179 // Check same base pointer. Be careful of subregisters, which can occur with
180 // vectors of pointers.
181 if (AddrReg0.getReg() == AddrReg1.getReg() &&
182 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
183 int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
184 AMDGPU::OpName::offset);
185 unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
186 unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
188 // Check both offsets fit in the reduced range.
189 if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
196 void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg,
199 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg),
200 E = MRI->reg_end(); I != E; ) {
201 MachineOperand &O = *I;
203 O.substVirtReg(DstReg, SubIdx, *TRI);
207 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
208 MachineBasicBlock::iterator I,
209 MachineBasicBlock::iterator Paired,
211 MachineBasicBlock *MBB = I->getParent();
213 // Be careful, since the addresses could be subregisters themselves in weird
214 // cases, like vectors of pointers.
215 const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
217 const MachineOperand *Dest0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst);
218 const MachineOperand *Dest1 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst);
221 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
223 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
225 unsigned NewOffset0 = Offset0 / EltSize;
226 unsigned NewOffset1 = Offset1 / EltSize;
227 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
229 // Prefer the st64 form if we can use it, even if we can fit the offset in the
230 // non st64 version. I'm not sure if there's any real reason to do this.
231 bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
235 Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
238 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
239 (NewOffset0 != NewOffset1) &&
240 "Computed offset doesn't fit");
242 const MCInstrDesc &Read2Desc = TII->get(Opc);
244 const TargetRegisterClass *SuperRC
245 = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
246 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
248 DebugLoc DL = I->getDebugLoc();
249 MachineInstrBuilder Read2
250 = BuildMI(*MBB, I, DL, Read2Desc, DestReg)
251 .addOperand(*AddrReg) // addr
252 .addImm(NewOffset0) // offset0
253 .addImm(NewOffset1) // offset1
255 .addMemOperand(*I->memoperands_begin())
256 .addMemOperand(*Paired->memoperands_begin());
258 unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
259 unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
261 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
263 // Copy to the old destination registers.
264 MachineInstr *Copy0 = BuildMI(*MBB, I, DL, CopyDesc)
265 .addOperand(*Dest0) // Copy to same destination including flags and sub reg.
266 .addReg(DestReg, 0, SubRegIdx0);
267 MachineInstr *Copy1 = BuildMI(*MBB, I, DL, CopyDesc)
269 .addReg(DestReg, RegState::Kill, SubRegIdx1);
271 LIS->InsertMachineInstrInMaps(Read2);
273 // repairLiveintervalsInRange() doesn't handle physical register, so we have
274 // to update the M0 range manually.
275 SlotIndex PairedIndex = LIS->getInstructionIndex(Paired);
276 LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
277 LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
278 bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
280 // The new write to the original destination register is now the copy. Steal
281 // the old SlotIndex.
282 LIS->ReplaceMachineInstrInMaps(I, Copy0);
283 LIS->ReplaceMachineInstrInMaps(Paired, Copy1);
285 I->eraseFromParent();
286 Paired->eraseFromParent();
288 LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg());
289 LIS->shrinkToUses(&AddrRegLI);
291 LIS->createAndComputeVirtRegInterval(DestReg);
294 SlotIndex Read2Index = LIS->getInstructionIndex(Read2);
295 M0Segment->end = Read2Index.getRegSlot();
298 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
299 return Read2.getInstr();
302 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
303 MachineBasicBlock::iterator I,
304 MachineBasicBlock::iterator Paired,
306 MachineBasicBlock *MBB = I->getParent();
308 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
309 // sure we preserve the subregister index and any register flags set on them.
310 const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
311 const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
312 const MachineOperand *Data1
313 = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
317 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
319 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
321 unsigned NewOffset0 = Offset0 / EltSize;
322 unsigned NewOffset1 = Offset1 / EltSize;
323 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
325 // Prefer the st64 form if we can use it, even if we can fit the offset in the
326 // non st64 version. I'm not sure if there's any real reason to do this.
327 bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
331 Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
334 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
335 (NewOffset0 != NewOffset1) &&
336 "Computed offset doesn't fit");
338 const MCInstrDesc &Write2Desc = TII->get(Opc);
339 DebugLoc DL = I->getDebugLoc();
341 // repairLiveintervalsInRange() doesn't handle physical register, so we have
342 // to update the M0 range manually.
343 SlotIndex PairedIndex = LIS->getInstructionIndex(Paired);
344 LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
345 LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
346 bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
348 MachineInstrBuilder Write2
349 = BuildMI(*MBB, I, DL, Write2Desc)
350 .addOperand(*Addr) // addr
351 .addOperand(*Data0) // data0
352 .addOperand(*Data1) // data1
353 .addImm(NewOffset0) // offset0
354 .addImm(NewOffset1) // offset1
356 .addMemOperand(*I->memoperands_begin())
357 .addMemOperand(*Paired->memoperands_begin());
359 // XXX - How do we express subregisters here?
360 unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
362 LIS->RemoveMachineInstrFromMaps(I);
363 LIS->RemoveMachineInstrFromMaps(Paired);
364 I->eraseFromParent();
365 Paired->eraseFromParent();
367 // This doesn't handle physical registers like M0
368 LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
371 SlotIndex Write2Index = LIS->getInstructionIndex(Write2);
372 M0Segment->end = Write2Index.getRegSlot();
375 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
376 return Write2.getInstr();
379 // Scan through looking for adjacent LDS operations with constant offsets from
380 // the same base register. We rely on the scheduler to do the hard work of
381 // clustering nearby loads, and assume these are all adjacent.
382 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
383 bool Modified = false;
385 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
386 MachineInstr &MI = *I;
388 // Don't combine if volatile.
389 if (MI.hasOrderedMemoryRef()) {
394 unsigned Opc = MI.getOpcode();
395 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
396 unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
397 MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
400 I = mergeRead2Pair(I, Match, Size);
406 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
407 unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
408 MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
411 I = mergeWrite2Pair(I, Match, Size);
425 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
426 const TargetSubtargetInfo &STM = MF.getSubtarget();
427 TRI = static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
428 TII = static_cast<const SIInstrInfo *>(STM.getInstrInfo());
429 MRI = &MF.getRegInfo();
431 LIS = &getAnalysis<LiveIntervals>();
433 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
435 assert(!MRI->isSSA());
437 bool Modified = false;
439 for (MachineBasicBlock &MBB : MF)
440 Modified |= optimizeBlock(MBB);