1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/Passes.h"
16 #include "llvm/IR/Function.h"
17 #include "llvm/IR/IRBuilder.h"
18 #include "llvm/IR/InstIterator.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetSubtargetInfo.h"
29 #define DEBUG_TYPE "atomic-expand"
32 class AtomicExpand: public FunctionPass {
33 const TargetMachine *TM;
34 const TargetLowering *TLI;
36 static char ID; // Pass identification, replacement for typeid
37 explicit AtomicExpand(const TargetMachine *TM = nullptr)
38 : FunctionPass(ID), TM(TM), TLI(nullptr) {
39 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
42 bool runOnFunction(Function &F) override;
45 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
46 bool IsStore, bool IsLoad);
47 bool expandAtomicLoad(LoadInst *LI);
48 bool expandAtomicLoadToLL(LoadInst *LI);
49 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
50 bool expandAtomicStore(StoreInst *SI);
51 bool expandAtomicRMW(AtomicRMWInst *AI);
52 bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
53 bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
54 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
55 bool isIdempotentRMW(AtomicRMWInst *AI);
56 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
60 char AtomicExpand::ID = 0;
61 char &llvm::AtomicExpandID = AtomicExpand::ID;
62 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
63 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
66 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
67 return new AtomicExpand(TM);
70 bool AtomicExpand::runOnFunction(Function &F) {
71 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
73 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
75 SmallVector<Instruction *, 1> AtomicInsts;
77 // Changing control-flow while iterating through it is a bad idea, so gather a
78 // list of all atomic instructions before we start.
79 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
81 AtomicInsts.push_back(&*I);
84 bool MadeChange = false;
85 for (auto I : AtomicInsts) {
86 auto LI = dyn_cast<LoadInst>(I);
87 auto SI = dyn_cast<StoreInst>(I);
88 auto RMWI = dyn_cast<AtomicRMWInst>(I);
89 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
90 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
91 "Unknown atomic instruction");
93 auto FenceOrdering = Monotonic;
95 if (TLI->getInsertFencesForAtomic()) {
96 if (LI && isAtLeastAcquire(LI->getOrdering())) {
97 FenceOrdering = LI->getOrdering();
98 LI->setOrdering(Monotonic);
101 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
102 FenceOrdering = SI->getOrdering();
103 SI->setOrdering(Monotonic);
106 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
107 isAtLeastAcquire(RMWI->getOrdering()))) {
108 FenceOrdering = RMWI->getOrdering();
109 RMWI->setOrdering(Monotonic);
110 IsStore = IsLoad = true;
111 } else if (CASI && !TLI->hasLoadLinkedStoreConditional() &&
112 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
113 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
114 // If a compare and swap is lowered to LL/SC, we can do smarter fence
115 // insertion, with a stronger one on the success path than on the
116 // failure path. As a result, fence insertion is directly done by
117 // expandAtomicCmpXchg in that case.
118 FenceOrdering = CASI->getSuccessOrdering();
119 CASI->setSuccessOrdering(Monotonic);
120 CASI->setFailureOrdering(Monotonic);
121 IsStore = IsLoad = true;
124 if (FenceOrdering != Monotonic) {
125 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
129 if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) {
130 MadeChange |= expandAtomicLoad(LI);
131 } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
132 MadeChange |= expandAtomicStore(SI);
134 // There are two different ways of expanding RMW instructions:
135 // - into a load if it is idempotent
136 // - into a Cmpxchg/LL-SC loop otherwise
137 // we try them in that order.
139 (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) ||
140 (TLI->shouldExpandAtomicRMWInIR(RMWI) && expandAtomicRMW(RMWI));
141 } else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
142 MadeChange |= expandAtomicCmpXchg(CASI);
148 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
149 bool IsStore, bool IsLoad) {
150 IRBuilder<> Builder(I);
152 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
154 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
155 // The trailing fence is emitted before the instruction instead of after
156 // because there is no easy way of setting Builder insertion point after
157 // an instruction. So we must erase it from the BB, and insert it back
158 // in the right place.
159 // We have a guard here because not every atomic operation generates a
162 TrailingFence->removeFromParent();
163 TrailingFence->insertAfter(I);
166 return (LeadingFence || TrailingFence);
169 bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
170 if (TLI->hasLoadLinkedStoreConditional())
171 return expandAtomicLoadToLL(LI);
173 return expandAtomicLoadToCmpXchg(LI);
176 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
177 IRBuilder<> Builder(LI);
179 // On some architectures, load-linked instructions are atomic for larger
180 // sizes than normal loads. For example, the only 64-bit load guaranteed
181 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
183 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
185 LI->replaceAllUsesWith(Val);
186 LI->eraseFromParent();
191 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
192 IRBuilder<> Builder(LI);
193 AtomicOrdering Order = LI->getOrdering();
194 Value *Addr = LI->getPointerOperand();
195 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
196 Constant *DummyVal = Constant::getNullValue(Ty);
198 Value *Pair = Builder.CreateAtomicCmpXchg(
199 Addr, DummyVal, DummyVal, Order,
200 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
201 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
203 LI->replaceAllUsesWith(Loaded);
204 LI->eraseFromParent();
209 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
210 // This function is only called on atomic stores that are too large to be
211 // atomic if implemented as a native store. So we replace them by an
212 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
213 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
214 // It is the responsibility of the target to only return true in
215 // shouldExpandAtomicRMW in cases where this is required and possible.
216 IRBuilder<> Builder(SI);
218 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
219 SI->getValueOperand(), SI->getOrdering());
220 SI->eraseFromParent();
222 // Now we have an appropriate swap instruction, lower it as usual.
223 return expandAtomicRMW(AI);
226 bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
227 if (TLI->hasLoadLinkedStoreConditional())
228 return expandAtomicRMWToLLSC(AI);
230 return expandAtomicRMWToCmpXchg(AI);
233 /// Emit IR to implement the given atomicrmw operation on values in registers,
234 /// returning the new value.
235 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
236 Value *Loaded, Value *Inc) {
239 case AtomicRMWInst::Xchg:
241 case AtomicRMWInst::Add:
242 return Builder.CreateAdd(Loaded, Inc, "new");
243 case AtomicRMWInst::Sub:
244 return Builder.CreateSub(Loaded, Inc, "new");
245 case AtomicRMWInst::And:
246 return Builder.CreateAnd(Loaded, Inc, "new");
247 case AtomicRMWInst::Nand:
248 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
249 case AtomicRMWInst::Or:
250 return Builder.CreateOr(Loaded, Inc, "new");
251 case AtomicRMWInst::Xor:
252 return Builder.CreateXor(Loaded, Inc, "new");
253 case AtomicRMWInst::Max:
254 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
255 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
256 case AtomicRMWInst::Min:
257 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
258 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
259 case AtomicRMWInst::UMax:
260 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
261 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
262 case AtomicRMWInst::UMin:
263 NewVal = Builder.CreateICmpULE(Loaded, Inc);
264 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
266 llvm_unreachable("Unknown atomic op");
270 bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
271 AtomicOrdering MemOpOrder = AI->getOrdering();
272 Value *Addr = AI->getPointerOperand();
273 BasicBlock *BB = AI->getParent();
274 Function *F = BB->getParent();
275 LLVMContext &Ctx = F->getContext();
277 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
279 // The standard expansion we produce is:
283 // %loaded = @load.linked(%addr)
284 // %new = some_op iN %loaded, %incr
285 // %stored = @store_conditional(%new, %addr)
286 // %try_again = icmp i32 ne %stored, 0
287 // br i1 %try_again, label %loop, label %atomicrmw.end
291 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
292 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
294 // This grabs the DebugLoc from AI.
295 IRBuilder<> Builder(AI);
297 // The split call above "helpfully" added a branch at the end of BB (to the
298 // wrong place), but we might want a fence too. It's easiest to just remove
299 // the branch entirely.
300 std::prev(BB->end())->eraseFromParent();
301 Builder.SetInsertPoint(BB);
302 Builder.CreateBr(LoopBB);
304 // Start the main loop block now that we've taken care of the preliminaries.
305 Builder.SetInsertPoint(LoopBB);
306 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
309 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
311 Value *StoreSuccess =
312 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
313 Value *TryAgain = Builder.CreateICmpNE(
314 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
315 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
317 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
319 AI->replaceAllUsesWith(Loaded);
320 AI->eraseFromParent();
325 bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
326 AtomicOrdering MemOpOrder =
327 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
328 Value *Addr = AI->getPointerOperand();
329 BasicBlock *BB = AI->getParent();
330 Function *F = BB->getParent();
331 LLVMContext &Ctx = F->getContext();
333 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
335 // The standard expansion we produce is:
337 // %init_loaded = load atomic iN* %addr
340 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
341 // %new = some_op iN %loaded, %incr
342 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
343 // %new_loaded = extractvalue { iN, i1 } %pair, 0
344 // %success = extractvalue { iN, i1 } %pair, 1
345 // br i1 %success, label %atomicrmw.end, label %loop
348 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
349 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
351 // This grabs the DebugLoc from AI.
352 IRBuilder<> Builder(AI);
354 // The split call above "helpfully" added a branch at the end of BB (to the
355 // wrong place), but we want a load. It's easiest to just remove
356 // the branch entirely.
357 std::prev(BB->end())->eraseFromParent();
358 Builder.SetInsertPoint(BB);
359 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
360 // Atomics require at least natural alignment.
361 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
362 Builder.CreateBr(LoopBB);
364 // Start the main loop block now that we've taken care of the preliminaries.
365 Builder.SetInsertPoint(LoopBB);
366 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
367 Loaded->addIncoming(InitLoaded, BB);
370 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
372 Value *Pair = Builder.CreateAtomicCmpXchg(
373 Addr, Loaded, NewVal, MemOpOrder,
374 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
375 Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
376 Loaded->addIncoming(NewLoaded, LoopBB);
378 Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
379 Builder.CreateCondBr(Success, ExitBB, LoopBB);
381 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
383 AI->replaceAllUsesWith(NewLoaded);
384 AI->eraseFromParent();
389 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
390 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
391 AtomicOrdering FailureOrder = CI->getFailureOrdering();
392 Value *Addr = CI->getPointerOperand();
393 BasicBlock *BB = CI->getParent();
394 Function *F = BB->getParent();
395 LLVMContext &Ctx = F->getContext();
396 // If getInsertFencesForAtomic() returns true, then the target does not want
397 // to deal with memory orders, and emitLeading/TrailingFence should take care
398 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
399 // should preserve the ordering.
400 AtomicOrdering MemOpOrder =
401 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
403 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
405 // The full expansion we produce is:
409 // %loaded = @load.linked(%addr)
410 // %should_store = icmp eq %loaded, %desired
411 // br i1 %should_store, label %cmpxchg.trystore,
412 // label %cmpxchg.failure
414 // %stored = @store_conditional(%new, %addr)
415 // %success = icmp eq i32 %stored, 0
416 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
419 // br label %cmpxchg.end
422 // br label %cmpxchg.end
424 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
425 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
426 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
428 BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
429 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
430 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
431 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
432 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
434 // This grabs the DebugLoc from CI
435 IRBuilder<> Builder(CI);
437 // The split call above "helpfully" added a branch at the end of BB (to the
438 // wrong place), but we might want a fence too. It's easiest to just remove
439 // the branch entirely.
440 std::prev(BB->end())->eraseFromParent();
441 Builder.SetInsertPoint(BB);
442 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
444 Builder.CreateBr(LoopBB);
446 // Start the main loop block now that we've taken care of the preliminaries.
447 Builder.SetInsertPoint(LoopBB);
448 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
450 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
452 // If the the cmpxchg doesn't actually need any ordering when it fails, we can
453 // jump straight past that fence instruction (if it exists).
454 Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
456 Builder.SetInsertPoint(TryStoreBB);
457 Value *StoreSuccess = TLI->emitStoreConditional(
458 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
459 StoreSuccess = Builder.CreateICmpEQ(
460 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
461 Builder.CreateCondBr(StoreSuccess, SuccessBB,
462 CI->isWeak() ? FailureBB : LoopBB);
464 // Make sure later instructions don't get reordered with a fence if necessary.
465 Builder.SetInsertPoint(SuccessBB);
466 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
468 Builder.CreateBr(ExitBB);
470 Builder.SetInsertPoint(FailureBB);
471 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
473 Builder.CreateBr(ExitBB);
475 // Finally, we have control-flow based knowledge of whether the cmpxchg
476 // succeeded or not. We expose this to later passes by converting any
477 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
479 // Setup the builder so we can create any PHIs we need.
480 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
481 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
482 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
483 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
485 // Look for any users of the cmpxchg that are just comparing the loaded value
486 // against the desired one, and replace them with the CFG-derived version.
487 SmallVector<ExtractValueInst *, 2> PrunedInsts;
488 for (auto User : CI->users()) {
489 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
493 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
494 "weird extraction from { iN, i1 }");
496 if (EV->getIndices()[0] == 0)
497 EV->replaceAllUsesWith(Loaded);
499 EV->replaceAllUsesWith(Success);
501 PrunedInsts.push_back(EV);
504 // We can remove the instructions now we're no longer iterating through them.
505 for (auto EV : PrunedInsts)
506 EV->eraseFromParent();
508 if (!CI->use_empty()) {
509 // Some use of the full struct return that we don't understand has happened,
510 // so we've got to reconstruct it properly.
512 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
513 Res = Builder.CreateInsertValue(Res, Success, 1);
515 CI->replaceAllUsesWith(Res);
518 CI->eraseFromParent();
522 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
523 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
527 AtomicRMWInst::BinOp Op = RMWI->getOperation();
529 case AtomicRMWInst::Add:
530 case AtomicRMWInst::Sub:
531 case AtomicRMWInst::Or:
532 case AtomicRMWInst::Xor:
534 case AtomicRMWInst::And:
535 return C->isMinusOne();
536 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
542 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
543 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
544 if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
545 expandAtomicLoad(ResultingLoad);