1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains a pass (at IR level) to replace atomic instructions with 11 // either (intrinsic-based) load-linked/store-conditional loops or 12 // AtomicCmpXchg. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/AtomicExpandUtils.h" 17 #include "llvm/CodeGen/Passes.h" 18 #include "llvm/IR/Function.h" 19 #include "llvm/IR/IRBuilder.h" 20 #include "llvm/IR/InstIterator.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/Intrinsics.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Target/TargetLowering.h" 26 #include "llvm/Target/TargetMachine.h" 27 #include "llvm/Target/TargetSubtargetInfo.h" 28 29 using namespace llvm; 30 31 #define DEBUG_TYPE "atomic-expand" 32 33 namespace { 34 class AtomicExpand: public FunctionPass { 35 const TargetMachine *TM; 36 const TargetLowering *TLI; 37 public: 38 static char ID; // Pass identification, replacement for typeid 39 explicit AtomicExpand(const TargetMachine *TM = nullptr) 40 : FunctionPass(ID), TM(TM), TLI(nullptr) { 41 initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); 42 } 43 44 bool runOnFunction(Function &F) override; 45 46 private: 47 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order, 48 bool IsStore, bool IsLoad); 49 bool tryExpandAtomicLoad(LoadInst *LI); 50 bool expandAtomicLoadToLL(LoadInst *LI); 51 bool expandAtomicLoadToCmpXchg(LoadInst *LI); 52 bool expandAtomicStore(StoreInst *SI); 53 bool tryExpandAtomicRMW(AtomicRMWInst *AI); 54 bool expandAtomicOpToLLSC( 55 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder, 56 std::function<Value *(IRBuilder<> &, Value *)> PerformOp); 57 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 58 bool isIdempotentRMW(AtomicRMWInst *AI); 59 bool simplifyIdempotentRMW(AtomicRMWInst *AI); 60 }; 61 } 62 63 char AtomicExpand::ID = 0; 64 char &llvm::AtomicExpandID = AtomicExpand::ID; 65 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand", 66 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg", 67 false, false) 68 69 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) { 70 return new AtomicExpand(TM); 71 } 72 73 bool AtomicExpand::runOnFunction(Function &F) { 74 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand()) 75 return false; 76 TLI = TM->getSubtargetImpl(F)->getTargetLowering(); 77 78 SmallVector<Instruction *, 1> AtomicInsts; 79 80 // Changing control-flow while iterating through it is a bad idea, so gather a 81 // list of all atomic instructions before we start. 82 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { 83 if (I->isAtomic()) 84 AtomicInsts.push_back(&*I); 85 } 86 87 bool MadeChange = false; 88 for (auto I : AtomicInsts) { 89 auto LI = dyn_cast<LoadInst>(I); 90 auto SI = dyn_cast<StoreInst>(I); 91 auto RMWI = dyn_cast<AtomicRMWInst>(I); 92 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); 93 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) && 94 "Unknown atomic instruction"); 95 96 auto FenceOrdering = Monotonic; 97 bool IsStore, IsLoad; 98 if (TLI->getInsertFencesForAtomic()) { 99 if (LI && isAtLeastAcquire(LI->getOrdering())) { 100 FenceOrdering = LI->getOrdering(); 101 LI->setOrdering(Monotonic); 102 IsStore = false; 103 IsLoad = true; 104 } else if (SI && isAtLeastRelease(SI->getOrdering())) { 105 FenceOrdering = SI->getOrdering(); 106 SI->setOrdering(Monotonic); 107 IsStore = true; 108 IsLoad = false; 109 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) || 110 isAtLeastAcquire(RMWI->getOrdering()))) { 111 FenceOrdering = RMWI->getOrdering(); 112 RMWI->setOrdering(Monotonic); 113 IsStore = IsLoad = true; 114 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) && 115 (isAtLeastRelease(CASI->getSuccessOrdering()) || 116 isAtLeastAcquire(CASI->getSuccessOrdering()))) { 117 // If a compare and swap is lowered to LL/SC, we can do smarter fence 118 // insertion, with a stronger one on the success path than on the 119 // failure path. As a result, fence insertion is directly done by 120 // expandAtomicCmpXchg in that case. 121 FenceOrdering = CASI->getSuccessOrdering(); 122 CASI->setSuccessOrdering(Monotonic); 123 CASI->setFailureOrdering(Monotonic); 124 IsStore = IsLoad = true; 125 } 126 127 if (FenceOrdering != Monotonic) { 128 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad); 129 } 130 } 131 132 if (LI) { 133 MadeChange |= tryExpandAtomicLoad(LI); 134 } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) { 135 MadeChange |= expandAtomicStore(SI); 136 } else if (RMWI) { 137 // There are two different ways of expanding RMW instructions: 138 // - into a load if it is idempotent 139 // - into a Cmpxchg/LL-SC loop otherwise 140 // we try them in that order. 141 142 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) { 143 MadeChange = true; 144 } else { 145 MadeChange |= tryExpandAtomicRMW(RMWI); 146 } 147 } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) { 148 MadeChange |= expandAtomicCmpXchg(CASI); 149 } 150 } 151 return MadeChange; 152 } 153 154 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order, 155 bool IsStore, bool IsLoad) { 156 IRBuilder<> Builder(I); 157 158 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad); 159 160 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad); 161 // The trailing fence is emitted before the instruction instead of after 162 // because there is no easy way of setting Builder insertion point after 163 // an instruction. So we must erase it from the BB, and insert it back 164 // in the right place. 165 // We have a guard here because not every atomic operation generates a 166 // trailing fence. 167 if (TrailingFence) { 168 TrailingFence->removeFromParent(); 169 TrailingFence->insertAfter(I); 170 } 171 172 return (LeadingFence || TrailingFence); 173 } 174 175 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { 176 switch (TLI->shouldExpandAtomicLoadInIR(LI)) { 177 case TargetLoweringBase::AtomicExpansionKind::None: 178 return false; 179 case TargetLoweringBase::AtomicExpansionKind::LLSC: 180 return expandAtomicOpToLLSC( 181 LI, LI->getPointerOperand(), LI->getOrdering(), 182 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; }); 183 case TargetLoweringBase::AtomicExpansionKind::LLOnly: 184 return expandAtomicLoadToLL(LI); 185 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: 186 return expandAtomicLoadToCmpXchg(LI); 187 } 188 llvm_unreachable("Unhandled case in tryExpandAtomicLoad"); 189 } 190 191 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { 192 IRBuilder<> Builder(LI); 193 194 // On some architectures, load-linked instructions are atomic for larger 195 // sizes than normal loads. For example, the only 64-bit load guaranteed 196 // to be single-copy atomic by ARM is an ldrexd (A3.5.3). 197 Value *Val = 198 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering()); 199 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 200 201 LI->replaceAllUsesWith(Val); 202 LI->eraseFromParent(); 203 204 return true; 205 } 206 207 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) { 208 IRBuilder<> Builder(LI); 209 AtomicOrdering Order = LI->getOrdering(); 210 Value *Addr = LI->getPointerOperand(); 211 Type *Ty = cast<PointerType>(Addr->getType())->getElementType(); 212 Constant *DummyVal = Constant::getNullValue(Ty); 213 214 Value *Pair = Builder.CreateAtomicCmpXchg( 215 Addr, DummyVal, DummyVal, Order, 216 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); 217 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded"); 218 219 LI->replaceAllUsesWith(Loaded); 220 LI->eraseFromParent(); 221 222 return true; 223 } 224 225 bool AtomicExpand::expandAtomicStore(StoreInst *SI) { 226 // This function is only called on atomic stores that are too large to be 227 // atomic if implemented as a native store. So we replace them by an 228 // atomic swap, that can be implemented for example as a ldrex/strex on ARM 229 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes. 230 // It is the responsibility of the target to only signal expansion via 231 // shouldExpandAtomicRMW in cases where this is required and possible. 232 IRBuilder<> Builder(SI); 233 AtomicRMWInst *AI = 234 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(), 235 SI->getValueOperand(), SI->getOrdering()); 236 SI->eraseFromParent(); 237 238 // Now we have an appropriate swap instruction, lower it as usual. 239 return tryExpandAtomicRMW(AI); 240 } 241 242 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr, 243 Value *Loaded, Value *NewVal, 244 AtomicOrdering MemOpOrder, 245 Value *&Success, Value *&NewLoaded) { 246 Value* Pair = Builder.CreateAtomicCmpXchg( 247 Addr, Loaded, NewVal, MemOpOrder, 248 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); 249 Success = Builder.CreateExtractValue(Pair, 1, "success"); 250 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 251 } 252 253 /// Emit IR to implement the given atomicrmw operation on values in registers, 254 /// returning the new value. 255 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, 256 Value *Loaded, Value *Inc) { 257 Value *NewVal; 258 switch (Op) { 259 case AtomicRMWInst::Xchg: 260 return Inc; 261 case AtomicRMWInst::Add: 262 return Builder.CreateAdd(Loaded, Inc, "new"); 263 case AtomicRMWInst::Sub: 264 return Builder.CreateSub(Loaded, Inc, "new"); 265 case AtomicRMWInst::And: 266 return Builder.CreateAnd(Loaded, Inc, "new"); 267 case AtomicRMWInst::Nand: 268 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new"); 269 case AtomicRMWInst::Or: 270 return Builder.CreateOr(Loaded, Inc, "new"); 271 case AtomicRMWInst::Xor: 272 return Builder.CreateXor(Loaded, Inc, "new"); 273 case AtomicRMWInst::Max: 274 NewVal = Builder.CreateICmpSGT(Loaded, Inc); 275 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 276 case AtomicRMWInst::Min: 277 NewVal = Builder.CreateICmpSLE(Loaded, Inc); 278 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 279 case AtomicRMWInst::UMax: 280 NewVal = Builder.CreateICmpUGT(Loaded, Inc); 281 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 282 case AtomicRMWInst::UMin: 283 NewVal = Builder.CreateICmpULE(Loaded, Inc); 284 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 285 default: 286 llvm_unreachable("Unknown atomic op"); 287 } 288 } 289 290 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { 291 switch (TLI->shouldExpandAtomicRMWInIR(AI)) { 292 case TargetLoweringBase::AtomicExpansionKind::None: 293 return false; 294 case TargetLoweringBase::AtomicExpansionKind::LLSC: 295 return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(), 296 [&](IRBuilder<> &Builder, Value *Loaded) { 297 return performAtomicOp(AI->getOperation(), 298 Builder, Loaded, 299 AI->getValOperand()); 300 }); 301 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: 302 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun); 303 default: 304 llvm_unreachable("Unhandled case in tryExpandAtomicRMW"); 305 } 306 } 307 308 bool AtomicExpand::expandAtomicOpToLLSC( 309 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder, 310 std::function<Value *(IRBuilder<> &, Value *)> PerformOp) { 311 BasicBlock *BB = I->getParent(); 312 Function *F = BB->getParent(); 313 LLVMContext &Ctx = F->getContext(); 314 315 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 316 // 317 // The standard expansion we produce is: 318 // [...] 319 // fence? 320 // atomicrmw.start: 321 // %loaded = @load.linked(%addr) 322 // %new = some_op iN %loaded, %incr 323 // %stored = @store_conditional(%new, %addr) 324 // %try_again = icmp i32 ne %stored, 0 325 // br i1 %try_again, label %loop, label %atomicrmw.end 326 // atomicrmw.end: 327 // fence? 328 // [...] 329 BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end"); 330 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 331 332 // This grabs the DebugLoc from I. 333 IRBuilder<> Builder(I); 334 335 // The split call above "helpfully" added a branch at the end of BB (to the 336 // wrong place), but we might want a fence too. It's easiest to just remove 337 // the branch entirely. 338 std::prev(BB->end())->eraseFromParent(); 339 Builder.SetInsertPoint(BB); 340 Builder.CreateBr(LoopBB); 341 342 // Start the main loop block now that we've taken care of the preliminaries. 343 Builder.SetInsertPoint(LoopBB); 344 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); 345 346 Value *NewVal = PerformOp(Builder, Loaded); 347 348 Value *StoreSuccess = 349 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder); 350 Value *TryAgain = Builder.CreateICmpNE( 351 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain"); 352 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB); 353 354 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 355 356 I->replaceAllUsesWith(Loaded); 357 I->eraseFromParent(); 358 359 return true; 360 } 361 362 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 363 AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); 364 AtomicOrdering FailureOrder = CI->getFailureOrdering(); 365 Value *Addr = CI->getPointerOperand(); 366 BasicBlock *BB = CI->getParent(); 367 Function *F = BB->getParent(); 368 LLVMContext &Ctx = F->getContext(); 369 // If getInsertFencesForAtomic() returns true, then the target does not want 370 // to deal with memory orders, and emitLeading/TrailingFence should take care 371 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we 372 // should preserve the ordering. 373 AtomicOrdering MemOpOrder = 374 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder; 375 376 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord 377 // 378 // The full expansion we produce is: 379 // [...] 380 // fence? 381 // cmpxchg.start: 382 // %loaded = @load.linked(%addr) 383 // %should_store = icmp eq %loaded, %desired 384 // br i1 %should_store, label %cmpxchg.trystore, 385 // label %cmpxchg.nostore 386 // cmpxchg.trystore: 387 // %stored = @store_conditional(%new, %addr) 388 // %success = icmp eq i32 %stored, 0 389 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure 390 // cmpxchg.success: 391 // fence? 392 // br label %cmpxchg.end 393 // cmpxchg.nostore: 394 // @load_linked_fail_balance()? 395 // br label %cmpxchg.failure 396 // cmpxchg.failure: 397 // fence? 398 // br label %cmpxchg.end 399 // cmpxchg.end: 400 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure] 401 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0 402 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1 403 // [...] 404 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end"); 405 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB); 406 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB); 407 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB); 408 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB); 409 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB); 410 411 // This grabs the DebugLoc from CI 412 IRBuilder<> Builder(CI); 413 414 // The split call above "helpfully" added a branch at the end of BB (to the 415 // wrong place), but we might want a fence too. It's easiest to just remove 416 // the branch entirely. 417 std::prev(BB->end())->eraseFromParent(); 418 Builder.SetInsertPoint(BB); 419 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true, 420 /*IsLoad=*/true); 421 Builder.CreateBr(LoopBB); 422 423 // Start the main loop block now that we've taken care of the preliminaries. 424 Builder.SetInsertPoint(LoopBB); 425 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); 426 Value *ShouldStore = 427 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store"); 428 429 // If the cmpxchg doesn't actually need any ordering when it fails, we can 430 // jump straight past that fence instruction (if it exists). 431 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB); 432 433 Builder.SetInsertPoint(TryStoreBB); 434 Value *StoreSuccess = TLI->emitStoreConditional( 435 Builder, CI->getNewValOperand(), Addr, MemOpOrder); 436 StoreSuccess = Builder.CreateICmpEQ( 437 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success"); 438 Builder.CreateCondBr(StoreSuccess, SuccessBB, 439 CI->isWeak() ? FailureBB : LoopBB); 440 441 // Make sure later instructions don't get reordered with a fence if necessary. 442 Builder.SetInsertPoint(SuccessBB); 443 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true, 444 /*IsLoad=*/true); 445 Builder.CreateBr(ExitBB); 446 447 Builder.SetInsertPoint(NoStoreBB); 448 // In the failing case, where we don't execute the store-conditional, the 449 // target might want to balance out the load-linked with a dedicated 450 // instruction (e.g., on ARM, clearing the exclusive monitor). 451 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 452 Builder.CreateBr(FailureBB); 453 454 Builder.SetInsertPoint(FailureBB); 455 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true, 456 /*IsLoad=*/true); 457 Builder.CreateBr(ExitBB); 458 459 // Finally, we have control-flow based knowledge of whether the cmpxchg 460 // succeeded or not. We expose this to later passes by converting any 461 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI. 462 463 // Setup the builder so we can create any PHIs we need. 464 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 465 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2); 466 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB); 467 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB); 468 469 // Look for any users of the cmpxchg that are just comparing the loaded value 470 // against the desired one, and replace them with the CFG-derived version. 471 SmallVector<ExtractValueInst *, 2> PrunedInsts; 472 for (auto User : CI->users()) { 473 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User); 474 if (!EV) 475 continue; 476 477 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 && 478 "weird extraction from { iN, i1 }"); 479 480 if (EV->getIndices()[0] == 0) 481 EV->replaceAllUsesWith(Loaded); 482 else 483 EV->replaceAllUsesWith(Success); 484 485 PrunedInsts.push_back(EV); 486 } 487 488 // We can remove the instructions now we're no longer iterating through them. 489 for (auto EV : PrunedInsts) 490 EV->eraseFromParent(); 491 492 if (!CI->use_empty()) { 493 // Some use of the full struct return that we don't understand has happened, 494 // so we've got to reconstruct it properly. 495 Value *Res; 496 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0); 497 Res = Builder.CreateInsertValue(Res, Success, 1); 498 499 CI->replaceAllUsesWith(Res); 500 } 501 502 CI->eraseFromParent(); 503 return true; 504 } 505 506 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) { 507 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand()); 508 if(!C) 509 return false; 510 511 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 512 switch(Op) { 513 case AtomicRMWInst::Add: 514 case AtomicRMWInst::Sub: 515 case AtomicRMWInst::Or: 516 case AtomicRMWInst::Xor: 517 return C->isZero(); 518 case AtomicRMWInst::And: 519 return C->isMinusOne(); 520 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/... 521 default: 522 return false; 523 } 524 } 525 526 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) { 527 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { 528 tryExpandAtomicLoad(ResultingLoad); 529 return true; 530 } 531 return false; 532 } 533 534 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 535 CreateCmpXchgInstFun CreateCmpXchg) { 536 assert(AI); 537 538 AtomicOrdering MemOpOrder = 539 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering(); 540 Value *Addr = AI->getPointerOperand(); 541 BasicBlock *BB = AI->getParent(); 542 Function *F = BB->getParent(); 543 LLVMContext &Ctx = F->getContext(); 544 545 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 546 // 547 // The standard expansion we produce is: 548 // [...] 549 // %init_loaded = load atomic iN* %addr 550 // br label %loop 551 // loop: 552 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ] 553 // %new = some_op iN %loaded, %incr 554 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new 555 // %new_loaded = extractvalue { iN, i1 } %pair, 0 556 // %success = extractvalue { iN, i1 } %pair, 1 557 // br i1 %success, label %atomicrmw.end, label %loop 558 // atomicrmw.end: 559 // [...] 560 BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end"); 561 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 562 563 // This grabs the DebugLoc from AI. 564 IRBuilder<> Builder(AI); 565 566 // The split call above "helpfully" added a branch at the end of BB (to the 567 // wrong place), but we want a load. It's easiest to just remove 568 // the branch entirely. 569 std::prev(BB->end())->eraseFromParent(); 570 Builder.SetInsertPoint(BB); 571 LoadInst *InitLoaded = Builder.CreateLoad(Addr); 572 // Atomics require at least natural alignment. 573 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8); 574 Builder.CreateBr(LoopBB); 575 576 // Start the main loop block now that we've taken care of the preliminaries. 577 Builder.SetInsertPoint(LoopBB); 578 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded"); 579 Loaded->addIncoming(InitLoaded, BB); 580 581 Value *NewVal = 582 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand()); 583 584 Value *NewLoaded = nullptr; 585 Value *Success = nullptr; 586 587 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder, 588 Success, NewLoaded); 589 assert(Success && NewLoaded); 590 591 Loaded->addIncoming(NewLoaded, LoopBB); 592 593 Builder.CreateCondBr(Success, ExitBB, LoopBB); 594 595 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 596 597 AI->replaceAllUsesWith(NewLoaded); 598 AI->eraseFromParent(); 599 600 return true; 601 } 602