1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains a pass (at IR level) to replace atomic instructions with 10 // __atomic_* library calls, or target specific instruction which implement the 11 // same semantics in a way which better fits the target backend. This can 12 // include the use of (intrinsic-based) load-linked/store-conditional loops, 13 // AtomicCmpXchg, or type coercions. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/CodeGen/AtomicExpandUtils.h" 21 #include "llvm/CodeGen/RuntimeLibcalls.h" 22 #include "llvm/CodeGen/TargetLowering.h" 23 #include "llvm/CodeGen/TargetPassConfig.h" 24 #include "llvm/CodeGen/TargetSubtargetInfo.h" 25 #include "llvm/CodeGen/ValueTypes.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/BasicBlock.h" 28 #include "llvm/IR/Constant.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DerivedTypes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/IRBuilder.h" 34 #include "llvm/IR/InstIterator.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/IR/User.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/InitializePasses.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/AtomicOrdering.h" 44 #include "llvm/Support/Casting.h" 45 #include "llvm/Support/Debug.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Support/raw_ostream.h" 48 #include "llvm/Target/TargetMachine.h" 49 #include <cassert> 50 #include <cstdint> 51 #include <iterator> 52 53 using namespace llvm; 54 55 #define DEBUG_TYPE "atomic-expand" 56 57 namespace { 58 59 class AtomicExpand: public FunctionPass { 60 const TargetLowering *TLI = nullptr; 61 62 public: 63 static char ID; // Pass identification, replacement for typeid 64 65 AtomicExpand() : FunctionPass(ID) { 66 initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); 67 } 68 69 bool runOnFunction(Function &F) override; 70 71 private: 72 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order); 73 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL); 74 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI); 75 bool tryExpandAtomicLoad(LoadInst *LI); 76 bool expandAtomicLoadToLL(LoadInst *LI); 77 bool expandAtomicLoadToCmpXchg(LoadInst *LI); 78 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI); 79 bool expandAtomicStore(StoreInst *SI); 80 bool tryExpandAtomicRMW(AtomicRMWInst *AI); 81 Value * 82 insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr, 83 Align AddrAlign, AtomicOrdering MemOpOrder, 84 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp); 85 void expandAtomicOpToLLSC( 86 Instruction *I, Type *ResultTy, Value *Addr, Align AddrAlign, 87 AtomicOrdering MemOpOrder, 88 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp); 89 void expandPartwordAtomicRMW( 90 AtomicRMWInst *I, 91 TargetLoweringBase::AtomicExpansionKind ExpansionKind); 92 AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI); 93 bool expandPartwordCmpXchg(AtomicCmpXchgInst *I); 94 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI); 95 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); 96 97 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); 98 static Value *insertRMWCmpXchgLoop( 99 IRBuilder<> &Builder, Type *ResultType, Value *Addr, Align AddrAlign, 100 AtomicOrdering MemOpOrder, 101 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp, 102 CreateCmpXchgInstFun CreateCmpXchg); 103 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI); 104 105 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 106 bool isIdempotentRMW(AtomicRMWInst *RMWI); 107 bool simplifyIdempotentRMW(AtomicRMWInst *RMWI); 108 109 bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, Align Alignment, 110 Value *PointerOperand, Value *ValueOperand, 111 Value *CASExpected, AtomicOrdering Ordering, 112 AtomicOrdering Ordering2, 113 ArrayRef<RTLIB::Libcall> Libcalls); 114 void expandAtomicLoadToLibcall(LoadInst *LI); 115 void expandAtomicStoreToLibcall(StoreInst *LI); 116 void expandAtomicRMWToLibcall(AtomicRMWInst *I); 117 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I); 118 119 friend bool 120 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 121 CreateCmpXchgInstFun CreateCmpXchg); 122 }; 123 124 } // end anonymous namespace 125 126 char AtomicExpand::ID = 0; 127 128 char &llvm::AtomicExpandID = AtomicExpand::ID; 129 130 INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", 131 false, false) 132 133 FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); } 134 135 // Helper functions to retrieve the size of atomic instructions. 136 static unsigned getAtomicOpSize(LoadInst *LI) { 137 const DataLayout &DL = LI->getModule()->getDataLayout(); 138 return DL.getTypeStoreSize(LI->getType()); 139 } 140 141 static unsigned getAtomicOpSize(StoreInst *SI) { 142 const DataLayout &DL = SI->getModule()->getDataLayout(); 143 return DL.getTypeStoreSize(SI->getValueOperand()->getType()); 144 } 145 146 static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) { 147 const DataLayout &DL = RMWI->getModule()->getDataLayout(); 148 return DL.getTypeStoreSize(RMWI->getValOperand()->getType()); 149 } 150 151 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) { 152 const DataLayout &DL = CASI->getModule()->getDataLayout(); 153 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType()); 154 } 155 156 // Determine if a particular atomic operation has a supported size, 157 // and is of appropriate alignment, to be passed through for target 158 // lowering. (Versus turning into a __atomic libcall) 159 template <typename Inst> 160 static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) { 161 unsigned Size = getAtomicOpSize(I); 162 Align Alignment = I->getAlign(); 163 return Alignment >= Size && 164 Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8; 165 } 166 167 bool AtomicExpand::runOnFunction(Function &F) { 168 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 169 if (!TPC) 170 return false; 171 172 auto &TM = TPC->getTM<TargetMachine>(); 173 if (!TM.getSubtargetImpl(F)->enableAtomicExpand()) 174 return false; 175 TLI = TM.getSubtargetImpl(F)->getTargetLowering(); 176 177 SmallVector<Instruction *, 1> AtomicInsts; 178 179 // Changing control-flow while iterating through it is a bad idea, so gather a 180 // list of all atomic instructions before we start. 181 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 182 Instruction *I = &*II; 183 if (I->isAtomic() && !isa<FenceInst>(I)) 184 AtomicInsts.push_back(I); 185 } 186 187 bool MadeChange = false; 188 for (auto I : AtomicInsts) { 189 auto LI = dyn_cast<LoadInst>(I); 190 auto SI = dyn_cast<StoreInst>(I); 191 auto RMWI = dyn_cast<AtomicRMWInst>(I); 192 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); 193 assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction"); 194 195 // If the Size/Alignment is not supported, replace with a libcall. 196 if (LI) { 197 if (!atomicSizeSupported(TLI, LI)) { 198 expandAtomicLoadToLibcall(LI); 199 MadeChange = true; 200 continue; 201 } 202 } else if (SI) { 203 if (!atomicSizeSupported(TLI, SI)) { 204 expandAtomicStoreToLibcall(SI); 205 MadeChange = true; 206 continue; 207 } 208 } else if (RMWI) { 209 if (!atomicSizeSupported(TLI, RMWI)) { 210 expandAtomicRMWToLibcall(RMWI); 211 MadeChange = true; 212 continue; 213 } 214 } else if (CASI) { 215 if (!atomicSizeSupported(TLI, CASI)) { 216 expandAtomicCASToLibcall(CASI); 217 MadeChange = true; 218 continue; 219 } 220 } 221 222 if (TLI->shouldInsertFencesForAtomic(I)) { 223 auto FenceOrdering = AtomicOrdering::Monotonic; 224 if (LI && isAcquireOrStronger(LI->getOrdering())) { 225 FenceOrdering = LI->getOrdering(); 226 LI->setOrdering(AtomicOrdering::Monotonic); 227 } else if (SI && isReleaseOrStronger(SI->getOrdering())) { 228 FenceOrdering = SI->getOrdering(); 229 SI->setOrdering(AtomicOrdering::Monotonic); 230 } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) || 231 isAcquireOrStronger(RMWI->getOrdering()))) { 232 FenceOrdering = RMWI->getOrdering(); 233 RMWI->setOrdering(AtomicOrdering::Monotonic); 234 } else if (CASI && 235 TLI->shouldExpandAtomicCmpXchgInIR(CASI) == 236 TargetLoweringBase::AtomicExpansionKind::None && 237 (isReleaseOrStronger(CASI->getSuccessOrdering()) || 238 isAcquireOrStronger(CASI->getSuccessOrdering()))) { 239 // If a compare and swap is lowered to LL/SC, we can do smarter fence 240 // insertion, with a stronger one on the success path than on the 241 // failure path. As a result, fence insertion is directly done by 242 // expandAtomicCmpXchg in that case. 243 FenceOrdering = CASI->getSuccessOrdering(); 244 CASI->setSuccessOrdering(AtomicOrdering::Monotonic); 245 CASI->setFailureOrdering(AtomicOrdering::Monotonic); 246 } 247 248 if (FenceOrdering != AtomicOrdering::Monotonic) { 249 MadeChange |= bracketInstWithFences(I, FenceOrdering); 250 } 251 } 252 253 if (LI) { 254 if (LI->getType()->isFloatingPointTy()) { 255 // TODO: add a TLI hook to control this so that each target can 256 // convert to lowering the original type one at a time. 257 LI = convertAtomicLoadToIntegerType(LI); 258 assert(LI->getType()->isIntegerTy() && "invariant broken"); 259 MadeChange = true; 260 } 261 262 MadeChange |= tryExpandAtomicLoad(LI); 263 } else if (SI) { 264 if (SI->getValueOperand()->getType()->isFloatingPointTy()) { 265 // TODO: add a TLI hook to control this so that each target can 266 // convert to lowering the original type one at a time. 267 SI = convertAtomicStoreToIntegerType(SI); 268 assert(SI->getValueOperand()->getType()->isIntegerTy() && 269 "invariant broken"); 270 MadeChange = true; 271 } 272 273 if (TLI->shouldExpandAtomicStoreInIR(SI)) 274 MadeChange |= expandAtomicStore(SI); 275 } else if (RMWI) { 276 // There are two different ways of expanding RMW instructions: 277 // - into a load if it is idempotent 278 // - into a Cmpxchg/LL-SC loop otherwise 279 // we try them in that order. 280 281 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) { 282 MadeChange = true; 283 } else { 284 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 285 unsigned ValueSize = getAtomicOpSize(RMWI); 286 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 287 if (ValueSize < MinCASSize && 288 (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || 289 Op == AtomicRMWInst::And)) { 290 RMWI = widenPartwordAtomicRMW(RMWI); 291 MadeChange = true; 292 } 293 294 MadeChange |= tryExpandAtomicRMW(RMWI); 295 } 296 } else if (CASI) { 297 // TODO: when we're ready to make the change at the IR level, we can 298 // extend convertCmpXchgToInteger for floating point too. 299 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() && 300 "unimplemented - floating point not legal at IR level"); 301 if (CASI->getCompareOperand()->getType()->isPointerTy() ) { 302 // TODO: add a TLI hook to control this so that each target can 303 // convert to lowering the original type one at a time. 304 CASI = convertCmpXchgToIntegerType(CASI); 305 assert(CASI->getCompareOperand()->getType()->isIntegerTy() && 306 "invariant broken"); 307 MadeChange = true; 308 } 309 310 MadeChange |= tryExpandAtomicCmpXchg(CASI); 311 } 312 } 313 return MadeChange; 314 } 315 316 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) { 317 IRBuilder<> Builder(I); 318 319 auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order); 320 321 auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order); 322 // We have a guard here because not every atomic operation generates a 323 // trailing fence. 324 if (TrailingFence) 325 TrailingFence->moveAfter(I); 326 327 return (LeadingFence || TrailingFence); 328 } 329 330 /// Get the iX type with the same bitwidth as T. 331 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T, 332 const DataLayout &DL) { 333 EVT VT = TLI->getMemValueType(DL, T); 334 unsigned BitWidth = VT.getStoreSizeInBits(); 335 assert(BitWidth == VT.getSizeInBits() && "must be a power of two"); 336 return IntegerType::get(T->getContext(), BitWidth); 337 } 338 339 /// Convert an atomic load of a non-integral type to an integer load of the 340 /// equivalent bitwidth. See the function comment on 341 /// convertAtomicStoreToIntegerType for background. 342 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) { 343 auto *M = LI->getModule(); 344 Type *NewTy = getCorrespondingIntegerType(LI->getType(), 345 M->getDataLayout()); 346 347 IRBuilder<> Builder(LI); 348 349 Value *Addr = LI->getPointerOperand(); 350 Type *PT = PointerType::get(NewTy, 351 Addr->getType()->getPointerAddressSpace()); 352 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 353 354 auto *NewLI = Builder.CreateLoad(NewTy, NewAddr); 355 NewLI->setAlignment(LI->getAlign()); 356 NewLI->setVolatile(LI->isVolatile()); 357 NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID()); 358 LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n"); 359 360 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType()); 361 LI->replaceAllUsesWith(NewVal); 362 LI->eraseFromParent(); 363 return NewLI; 364 } 365 366 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { 367 switch (TLI->shouldExpandAtomicLoadInIR(LI)) { 368 case TargetLoweringBase::AtomicExpansionKind::None: 369 return false; 370 case TargetLoweringBase::AtomicExpansionKind::LLSC: 371 expandAtomicOpToLLSC( 372 LI, LI->getType(), LI->getPointerOperand(), LI->getAlign(), 373 LI->getOrdering(), 374 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; }); 375 return true; 376 case TargetLoweringBase::AtomicExpansionKind::LLOnly: 377 return expandAtomicLoadToLL(LI); 378 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: 379 return expandAtomicLoadToCmpXchg(LI); 380 default: 381 llvm_unreachable("Unhandled case in tryExpandAtomicLoad"); 382 } 383 } 384 385 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { 386 IRBuilder<> Builder(LI); 387 388 // On some architectures, load-linked instructions are atomic for larger 389 // sizes than normal loads. For example, the only 64-bit load guaranteed 390 // to be single-copy atomic by ARM is an ldrexd (A3.5.3). 391 Value *Val = 392 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering()); 393 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 394 395 LI->replaceAllUsesWith(Val); 396 LI->eraseFromParent(); 397 398 return true; 399 } 400 401 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) { 402 IRBuilder<> Builder(LI); 403 AtomicOrdering Order = LI->getOrdering(); 404 if (Order == AtomicOrdering::Unordered) 405 Order = AtomicOrdering::Monotonic; 406 407 Value *Addr = LI->getPointerOperand(); 408 Type *Ty = cast<PointerType>(Addr->getType())->getElementType(); 409 Constant *DummyVal = Constant::getNullValue(Ty); 410 411 Value *Pair = Builder.CreateAtomicCmpXchg( 412 Addr, DummyVal, DummyVal, LI->getAlign(), Order, 413 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); 414 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded"); 415 416 LI->replaceAllUsesWith(Loaded); 417 LI->eraseFromParent(); 418 419 return true; 420 } 421 422 /// Convert an atomic store of a non-integral type to an integer store of the 423 /// equivalent bitwidth. We used to not support floating point or vector 424 /// atomics in the IR at all. The backends learned to deal with the bitcast 425 /// idiom because that was the only way of expressing the notion of a atomic 426 /// float or vector store. The long term plan is to teach each backend to 427 /// instruction select from the original atomic store, but as a migration 428 /// mechanism, we convert back to the old format which the backends understand. 429 /// Each backend will need individual work to recognize the new format. 430 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { 431 IRBuilder<> Builder(SI); 432 auto *M = SI->getModule(); 433 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(), 434 M->getDataLayout()); 435 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy); 436 437 Value *Addr = SI->getPointerOperand(); 438 Type *PT = PointerType::get(NewTy, 439 Addr->getType()->getPointerAddressSpace()); 440 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 441 442 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr); 443 NewSI->setAlignment(SI->getAlign()); 444 NewSI->setVolatile(SI->isVolatile()); 445 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID()); 446 LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n"); 447 SI->eraseFromParent(); 448 return NewSI; 449 } 450 451 bool AtomicExpand::expandAtomicStore(StoreInst *SI) { 452 // This function is only called on atomic stores that are too large to be 453 // atomic if implemented as a native store. So we replace them by an 454 // atomic swap, that can be implemented for example as a ldrex/strex on ARM 455 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes. 456 // It is the responsibility of the target to only signal expansion via 457 // shouldExpandAtomicRMW in cases where this is required and possible. 458 IRBuilder<> Builder(SI); 459 AtomicRMWInst *AI = Builder.CreateAtomicRMW( 460 AtomicRMWInst::Xchg, SI->getPointerOperand(), SI->getValueOperand(), 461 SI->getAlign(), SI->getOrdering()); 462 SI->eraseFromParent(); 463 464 // Now we have an appropriate swap instruction, lower it as usual. 465 return tryExpandAtomicRMW(AI); 466 } 467 468 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr, 469 Value *Loaded, Value *NewVal, Align AddrAlign, 470 AtomicOrdering MemOpOrder, Value *&Success, 471 Value *&NewLoaded) { 472 Type *OrigTy = NewVal->getType(); 473 474 // This code can go away when cmpxchg supports FP types. 475 bool NeedBitcast = OrigTy->isFloatingPointTy(); 476 if (NeedBitcast) { 477 IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits()); 478 unsigned AS = Addr->getType()->getPointerAddressSpace(); 479 Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS)); 480 NewVal = Builder.CreateBitCast(NewVal, IntTy); 481 Loaded = Builder.CreateBitCast(Loaded, IntTy); 482 } 483 484 Value *Pair = Builder.CreateAtomicCmpXchg( 485 Addr, Loaded, NewVal, AddrAlign, MemOpOrder, 486 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); 487 Success = Builder.CreateExtractValue(Pair, 1, "success"); 488 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 489 490 if (NeedBitcast) 491 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy); 492 } 493 494 /// Emit IR to implement the given atomicrmw operation on values in registers, 495 /// returning the new value. 496 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, 497 Value *Loaded, Value *Inc) { 498 Value *NewVal; 499 switch (Op) { 500 case AtomicRMWInst::Xchg: 501 return Inc; 502 case AtomicRMWInst::Add: 503 return Builder.CreateAdd(Loaded, Inc, "new"); 504 case AtomicRMWInst::Sub: 505 return Builder.CreateSub(Loaded, Inc, "new"); 506 case AtomicRMWInst::And: 507 return Builder.CreateAnd(Loaded, Inc, "new"); 508 case AtomicRMWInst::Nand: 509 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new"); 510 case AtomicRMWInst::Or: 511 return Builder.CreateOr(Loaded, Inc, "new"); 512 case AtomicRMWInst::Xor: 513 return Builder.CreateXor(Loaded, Inc, "new"); 514 case AtomicRMWInst::Max: 515 NewVal = Builder.CreateICmpSGT(Loaded, Inc); 516 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 517 case AtomicRMWInst::Min: 518 NewVal = Builder.CreateICmpSLE(Loaded, Inc); 519 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 520 case AtomicRMWInst::UMax: 521 NewVal = Builder.CreateICmpUGT(Loaded, Inc); 522 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 523 case AtomicRMWInst::UMin: 524 NewVal = Builder.CreateICmpULE(Loaded, Inc); 525 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 526 case AtomicRMWInst::FAdd: 527 return Builder.CreateFAdd(Loaded, Inc, "new"); 528 case AtomicRMWInst::FSub: 529 return Builder.CreateFSub(Loaded, Inc, "new"); 530 default: 531 llvm_unreachable("Unknown atomic op"); 532 } 533 } 534 535 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { 536 switch (TLI->shouldExpandAtomicRMWInIR(AI)) { 537 case TargetLoweringBase::AtomicExpansionKind::None: 538 return false; 539 case TargetLoweringBase::AtomicExpansionKind::LLSC: { 540 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 541 unsigned ValueSize = getAtomicOpSize(AI); 542 if (ValueSize < MinCASSize) { 543 expandPartwordAtomicRMW(AI, 544 TargetLoweringBase::AtomicExpansionKind::LLSC); 545 } else { 546 auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) { 547 return performAtomicOp(AI->getOperation(), Builder, Loaded, 548 AI->getValOperand()); 549 }; 550 expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(), 551 AI->getAlign(), AI->getOrdering(), PerformOp); 552 } 553 return true; 554 } 555 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: { 556 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 557 unsigned ValueSize = getAtomicOpSize(AI); 558 if (ValueSize < MinCASSize) { 559 // TODO: Handle atomicrmw fadd/fsub 560 if (AI->getType()->isFloatingPointTy()) 561 return false; 562 563 expandPartwordAtomicRMW(AI, 564 TargetLoweringBase::AtomicExpansionKind::CmpXChg); 565 } else { 566 expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun); 567 } 568 return true; 569 } 570 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: { 571 expandAtomicRMWToMaskedIntrinsic(AI); 572 return true; 573 } 574 default: 575 llvm_unreachable("Unhandled case in tryExpandAtomicRMW"); 576 } 577 } 578 579 namespace { 580 581 struct PartwordMaskValues { 582 // These three fields are guaranteed to be set by createMaskInstrs. 583 Type *WordType = nullptr; 584 Type *ValueType = nullptr; 585 Value *AlignedAddr = nullptr; 586 Align AlignedAddrAlignment; 587 // The remaining fields can be null. 588 Value *ShiftAmt = nullptr; 589 Value *Mask = nullptr; 590 Value *Inv_Mask = nullptr; 591 }; 592 593 LLVM_ATTRIBUTE_UNUSED 594 raw_ostream &operator<<(raw_ostream &O, const PartwordMaskValues &PMV) { 595 auto PrintObj = [&O](auto *V) { 596 if (V) 597 O << *V; 598 else 599 O << "nullptr"; 600 O << '\n'; 601 }; 602 O << "PartwordMaskValues {\n"; 603 O << " WordType: "; 604 PrintObj(PMV.WordType); 605 O << " ValueType: "; 606 PrintObj(PMV.ValueType); 607 O << " AlignedAddr: "; 608 PrintObj(PMV.AlignedAddr); 609 O << " AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() << '\n'; 610 O << " ShiftAmt: "; 611 PrintObj(PMV.ShiftAmt); 612 O << " Mask: "; 613 PrintObj(PMV.Mask); 614 O << " Inv_Mask: "; 615 PrintObj(PMV.Inv_Mask); 616 O << "}\n"; 617 return O; 618 } 619 620 } // end anonymous namespace 621 622 /// This is a helper function which builds instructions to provide 623 /// values necessary for partword atomic operations. It takes an 624 /// incoming address, Addr, and ValueType, and constructs the address, 625 /// shift-amounts and masks needed to work with a larger value of size 626 /// WordSize. 627 /// 628 /// AlignedAddr: Addr rounded down to a multiple of WordSize 629 /// 630 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded 631 /// from AlignAddr for it to have the same value as if 632 /// ValueType was loaded from Addr. 633 /// 634 /// Mask: Value to mask with the value loaded from AlignAddr to 635 /// include only the part that would've been loaded from Addr. 636 /// 637 /// Inv_Mask: The inverse of Mask. 638 static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I, 639 Type *ValueType, Value *Addr, 640 Align AddrAlign, 641 unsigned MinWordSize) { 642 PartwordMaskValues PMV; 643 644 Module *M = I->getModule(); 645 LLVMContext &Ctx = M->getContext(); 646 const DataLayout &DL = M->getDataLayout(); 647 unsigned ValueSize = DL.getTypeStoreSize(ValueType); 648 649 PMV.ValueType = ValueType; 650 PMV.WordType = MinWordSize > ValueSize ? Type::getIntNTy(Ctx, MinWordSize * 8) 651 : ValueType; 652 if (PMV.ValueType == PMV.WordType) { 653 PMV.AlignedAddr = Addr; 654 PMV.AlignedAddrAlignment = AddrAlign; 655 return PMV; 656 } 657 658 assert(ValueSize < MinWordSize); 659 660 Type *WordPtrType = 661 PMV.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace()); 662 663 // TODO: we could skip some of this if AddrAlign >= MinWordSize. 664 Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx)); 665 PMV.AlignedAddr = Builder.CreateIntToPtr( 666 Builder.CreateAnd(AddrInt, ~(uint64_t)(MinWordSize - 1)), WordPtrType, 667 "AlignedAddr"); 668 PMV.AlignedAddrAlignment = Align(MinWordSize); 669 670 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1, "PtrLSB"); 671 if (DL.isLittleEndian()) { 672 // turn bytes into bits 673 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3); 674 } else { 675 // turn bytes into bits, and count from the other side. 676 PMV.ShiftAmt = Builder.CreateShl( 677 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3); 678 } 679 680 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType, "ShiftAmt"); 681 PMV.Mask = Builder.CreateShl( 682 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt, 683 "Mask"); 684 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask, "Inv_Mask"); 685 return PMV; 686 } 687 688 static Value *extractMaskedValue(IRBuilder<> &Builder, Value *WideWord, 689 const PartwordMaskValues &PMV) { 690 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch"); 691 if (PMV.WordType == PMV.ValueType) 692 return WideWord; 693 694 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt, "shifted"); 695 Value *Trunc = Builder.CreateTrunc(Shift, PMV.ValueType, "extracted"); 696 return Trunc; 697 } 698 699 static Value *insertMaskedValue(IRBuilder<> &Builder, Value *WideWord, 700 Value *Updated, const PartwordMaskValues &PMV) { 701 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch"); 702 assert(Updated->getType() == PMV.ValueType && "Value type mismatch"); 703 if (PMV.WordType == PMV.ValueType) 704 return Updated; 705 706 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType, "extended"); 707 Value *Shift = 708 Builder.CreateShl(ZExt, PMV.ShiftAmt, "shifted", /*HasNUW*/ true); 709 Value *And = Builder.CreateAnd(WideWord, PMV.Inv_Mask, "unmasked"); 710 Value *Or = Builder.CreateOr(And, Shift, "inserted"); 711 return Or; 712 } 713 714 /// Emit IR to implement a masked version of a given atomicrmw 715 /// operation. (That is, only the bits under the Mask should be 716 /// affected by the operation) 717 static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op, 718 IRBuilder<> &Builder, Value *Loaded, 719 Value *Shifted_Inc, Value *Inc, 720 const PartwordMaskValues &PMV) { 721 // TODO: update to use 722 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order 723 // to merge bits from two values without requiring PMV.Inv_Mask. 724 switch (Op) { 725 case AtomicRMWInst::Xchg: { 726 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 727 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc); 728 return FinalVal; 729 } 730 case AtomicRMWInst::Or: 731 case AtomicRMWInst::Xor: 732 case AtomicRMWInst::And: 733 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW"); 734 case AtomicRMWInst::Add: 735 case AtomicRMWInst::Sub: 736 case AtomicRMWInst::Nand: { 737 // The other arithmetic ops need to be masked into place. 738 Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc); 739 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask); 740 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 741 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked); 742 return FinalVal; 743 } 744 case AtomicRMWInst::Max: 745 case AtomicRMWInst::Min: 746 case AtomicRMWInst::UMax: 747 case AtomicRMWInst::UMin: { 748 // Finally, comparison ops will operate on the full value, so 749 // truncate down to the original size, and expand out again after 750 // doing the operation. 751 Value *Loaded_Extract = extractMaskedValue(Builder, Loaded, PMV); 752 Value *NewVal = performAtomicOp(Op, Builder, Loaded_Extract, Inc); 753 Value *FinalVal = insertMaskedValue(Builder, Loaded, NewVal, PMV); 754 return FinalVal; 755 } 756 default: 757 llvm_unreachable("Unknown atomic op"); 758 } 759 } 760 761 /// Expand a sub-word atomicrmw operation into an appropriate 762 /// word-sized operation. 763 /// 764 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same 765 /// way as a typical atomicrmw expansion. The only difference here is 766 /// that the operation inside of the loop may operate upon only a 767 /// part of the value. 768 void AtomicExpand::expandPartwordAtomicRMW( 769 AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) { 770 AtomicOrdering MemOpOrder = AI->getOrdering(); 771 772 IRBuilder<> Builder(AI); 773 774 PartwordMaskValues PMV = 775 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 776 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 777 778 Value *ValOperand_Shifted = 779 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), 780 PMV.ShiftAmt, "ValOperand_Shifted"); 781 782 auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) { 783 return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded, 784 ValOperand_Shifted, AI->getValOperand(), PMV); 785 }; 786 787 Value *OldResult; 788 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) { 789 OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr, 790 PMV.AlignedAddrAlignment, MemOpOrder, 791 PerformPartwordOp, createCmpXchgInstFun); 792 } else { 793 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC); 794 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr, 795 PMV.AlignedAddrAlignment, MemOpOrder, 796 PerformPartwordOp); 797 } 798 799 Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV); 800 AI->replaceAllUsesWith(FinalOldResult); 801 AI->eraseFromParent(); 802 } 803 804 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width. 805 AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) { 806 IRBuilder<> Builder(AI); 807 AtomicRMWInst::BinOp Op = AI->getOperation(); 808 809 assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || 810 Op == AtomicRMWInst::And) && 811 "Unable to widen operation"); 812 813 PartwordMaskValues PMV = 814 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 815 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 816 817 Value *ValOperand_Shifted = 818 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), 819 PMV.ShiftAmt, "ValOperand_Shifted"); 820 821 Value *NewOperand; 822 823 if (Op == AtomicRMWInst::And) 824 NewOperand = 825 Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand"); 826 else 827 NewOperand = ValOperand_Shifted; 828 829 AtomicRMWInst *NewAI = 830 Builder.CreateAtomicRMW(Op, PMV.AlignedAddr, NewOperand, 831 PMV.AlignedAddrAlignment, AI->getOrdering()); 832 833 Value *FinalOldResult = extractMaskedValue(Builder, NewAI, PMV); 834 AI->replaceAllUsesWith(FinalOldResult); 835 AI->eraseFromParent(); 836 return NewAI; 837 } 838 839 bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { 840 // The basic idea here is that we're expanding a cmpxchg of a 841 // smaller memory size up to a word-sized cmpxchg. To do this, we 842 // need to add a retry-loop for strong cmpxchg, so that 843 // modifications to other parts of the word don't cause a spurious 844 // failure. 845 846 // This generates code like the following: 847 // [[Setup mask values PMV.*]] 848 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt 849 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt 850 // %InitLoaded = load i32* %addr 851 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask 852 // br partword.cmpxchg.loop 853 // partword.cmpxchg.loop: 854 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ], 855 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ] 856 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted 857 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted 858 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp, 859 // i32 %FullWord_NewVal success_ordering failure_ordering 860 // %OldVal = extractvalue { i32, i1 } %NewCI, 0 861 // %Success = extractvalue { i32, i1 } %NewCI, 1 862 // br i1 %Success, label %partword.cmpxchg.end, 863 // label %partword.cmpxchg.failure 864 // partword.cmpxchg.failure: 865 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask 866 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut 867 // br i1 %ShouldContinue, label %partword.cmpxchg.loop, 868 // label %partword.cmpxchg.end 869 // partword.cmpxchg.end: 870 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt 871 // %FinalOldVal = trunc i32 %tmp1 to i8 872 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0 873 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1 874 875 Value *Addr = CI->getPointerOperand(); 876 Value *Cmp = CI->getCompareOperand(); 877 Value *NewVal = CI->getNewValOperand(); 878 879 BasicBlock *BB = CI->getParent(); 880 Function *F = BB->getParent(); 881 IRBuilder<> Builder(CI); 882 LLVMContext &Ctx = Builder.getContext(); 883 884 BasicBlock *EndBB = 885 BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end"); 886 auto FailureBB = 887 BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB); 888 auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB); 889 890 // The split call above "helpfully" added a branch at the end of BB 891 // (to the wrong place). 892 std::prev(BB->end())->eraseFromParent(); 893 Builder.SetInsertPoint(BB); 894 895 PartwordMaskValues PMV = 896 createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr, 897 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 898 899 // Shift the incoming values over, into the right location in the word. 900 Value *NewVal_Shifted = 901 Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt); 902 Value *Cmp_Shifted = 903 Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt); 904 905 // Load the entire current word, and mask into place the expected and new 906 // values 907 LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr); 908 InitLoaded->setVolatile(CI->isVolatile()); 909 Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask); 910 Builder.CreateBr(LoopBB); 911 912 // partword.cmpxchg.loop: 913 Builder.SetInsertPoint(LoopBB); 914 PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2); 915 Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB); 916 917 // Mask/Or the expected and new values into place in the loaded word. 918 Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted); 919 Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted); 920 AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg( 921 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment, 922 CI->getSuccessOrdering(), CI->getFailureOrdering(), CI->getSyncScopeID()); 923 NewCI->setVolatile(CI->isVolatile()); 924 // When we're building a strong cmpxchg, we need a loop, so you 925 // might think we could use a weak cmpxchg inside. But, using strong 926 // allows the below comparison for ShouldContinue, and we're 927 // expecting the underlying cmpxchg to be a machine instruction, 928 // which is strong anyways. 929 NewCI->setWeak(CI->isWeak()); 930 931 Value *OldVal = Builder.CreateExtractValue(NewCI, 0); 932 Value *Success = Builder.CreateExtractValue(NewCI, 1); 933 934 if (CI->isWeak()) 935 Builder.CreateBr(EndBB); 936 else 937 Builder.CreateCondBr(Success, EndBB, FailureBB); 938 939 // partword.cmpxchg.failure: 940 Builder.SetInsertPoint(FailureBB); 941 // Upon failure, verify that the masked-out part of the loaded value 942 // has been modified. If it didn't, abort the cmpxchg, since the 943 // masked-in part must've. 944 Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask); 945 Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut); 946 Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB); 947 948 // Add the second value to the phi from above 949 Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB); 950 951 // partword.cmpxchg.end: 952 Builder.SetInsertPoint(CI); 953 954 Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV); 955 Value *Res = UndefValue::get(CI->getType()); 956 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0); 957 Res = Builder.CreateInsertValue(Res, Success, 1); 958 959 CI->replaceAllUsesWith(Res); 960 CI->eraseFromParent(); 961 return true; 962 } 963 964 void AtomicExpand::expandAtomicOpToLLSC( 965 Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign, 966 AtomicOrdering MemOpOrder, 967 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) { 968 IRBuilder<> Builder(I); 969 Value *Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign, 970 MemOpOrder, PerformOp); 971 972 I->replaceAllUsesWith(Loaded); 973 I->eraseFromParent(); 974 } 975 976 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) { 977 IRBuilder<> Builder(AI); 978 979 PartwordMaskValues PMV = 980 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 981 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 982 983 // The value operand must be sign-extended for signed min/max so that the 984 // target's signed comparison instructions can be used. Otherwise, just 985 // zero-ext. 986 Instruction::CastOps CastOp = Instruction::ZExt; 987 AtomicRMWInst::BinOp RMWOp = AI->getOperation(); 988 if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min) 989 CastOp = Instruction::SExt; 990 991 Value *ValOperand_Shifted = Builder.CreateShl( 992 Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType), 993 PMV.ShiftAmt, "ValOperand_Shifted"); 994 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic( 995 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt, 996 AI->getOrdering()); 997 Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV); 998 AI->replaceAllUsesWith(FinalOldResult); 999 AI->eraseFromParent(); 1000 } 1001 1002 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) { 1003 IRBuilder<> Builder(CI); 1004 1005 PartwordMaskValues PMV = createMaskInstrs( 1006 Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(), 1007 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 1008 1009 Value *CmpVal_Shifted = Builder.CreateShl( 1010 Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt, 1011 "CmpVal_Shifted"); 1012 Value *NewVal_Shifted = Builder.CreateShl( 1013 Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt, 1014 "NewVal_Shifted"); 1015 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic( 1016 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask, 1017 CI->getSuccessOrdering()); 1018 Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV); 1019 Value *Res = UndefValue::get(CI->getType()); 1020 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0); 1021 Value *Success = Builder.CreateICmpEQ( 1022 CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success"); 1023 Res = Builder.CreateInsertValue(Res, Success, 1); 1024 1025 CI->replaceAllUsesWith(Res); 1026 CI->eraseFromParent(); 1027 } 1028 1029 Value *AtomicExpand::insertRMWLLSCLoop( 1030 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign, 1031 AtomicOrdering MemOpOrder, 1032 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) { 1033 LLVMContext &Ctx = Builder.getContext(); 1034 BasicBlock *BB = Builder.GetInsertBlock(); 1035 Function *F = BB->getParent(); 1036 1037 assert(AddrAlign >= 1038 F->getParent()->getDataLayout().getTypeStoreSize(ResultTy) && 1039 "Expected at least natural alignment at this point."); 1040 1041 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 1042 // 1043 // The standard expansion we produce is: 1044 // [...] 1045 // atomicrmw.start: 1046 // %loaded = @load.linked(%addr) 1047 // %new = some_op iN %loaded, %incr 1048 // %stored = @store_conditional(%new, %addr) 1049 // %try_again = icmp i32 ne %stored, 0 1050 // br i1 %try_again, label %loop, label %atomicrmw.end 1051 // atomicrmw.end: 1052 // [...] 1053 BasicBlock *ExitBB = 1054 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end"); 1055 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 1056 1057 // The split call above "helpfully" added a branch at the end of BB (to the 1058 // wrong place). 1059 std::prev(BB->end())->eraseFromParent(); 1060 Builder.SetInsertPoint(BB); 1061 Builder.CreateBr(LoopBB); 1062 1063 // Start the main loop block now that we've taken care of the preliminaries. 1064 Builder.SetInsertPoint(LoopBB); 1065 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); 1066 1067 Value *NewVal = PerformOp(Builder, Loaded); 1068 1069 Value *StoreSuccess = 1070 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder); 1071 Value *TryAgain = Builder.CreateICmpNE( 1072 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain"); 1073 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB); 1074 1075 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1076 return Loaded; 1077 } 1078 1079 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of 1080 /// the equivalent bitwidth. We used to not support pointer cmpxchg in the 1081 /// IR. As a migration step, we convert back to what use to be the standard 1082 /// way to represent a pointer cmpxchg so that we can update backends one by 1083 /// one. 1084 AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) { 1085 auto *M = CI->getModule(); 1086 Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(), 1087 M->getDataLayout()); 1088 1089 IRBuilder<> Builder(CI); 1090 1091 Value *Addr = CI->getPointerOperand(); 1092 Type *PT = PointerType::get(NewTy, 1093 Addr->getType()->getPointerAddressSpace()); 1094 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 1095 1096 Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy); 1097 Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy); 1098 1099 auto *NewCI = Builder.CreateAtomicCmpXchg( 1100 NewAddr, NewCmp, NewNewVal, CI->getAlign(), CI->getSuccessOrdering(), 1101 CI->getFailureOrdering(), CI->getSyncScopeID()); 1102 NewCI->setVolatile(CI->isVolatile()); 1103 NewCI->setWeak(CI->isWeak()); 1104 LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n"); 1105 1106 Value *OldVal = Builder.CreateExtractValue(NewCI, 0); 1107 Value *Succ = Builder.CreateExtractValue(NewCI, 1); 1108 1109 OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType()); 1110 1111 Value *Res = UndefValue::get(CI->getType()); 1112 Res = Builder.CreateInsertValue(Res, OldVal, 0); 1113 Res = Builder.CreateInsertValue(Res, Succ, 1); 1114 1115 CI->replaceAllUsesWith(Res); 1116 CI->eraseFromParent(); 1117 return NewCI; 1118 } 1119 1120 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 1121 AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); 1122 AtomicOrdering FailureOrder = CI->getFailureOrdering(); 1123 Value *Addr = CI->getPointerOperand(); 1124 BasicBlock *BB = CI->getParent(); 1125 Function *F = BB->getParent(); 1126 LLVMContext &Ctx = F->getContext(); 1127 // If shouldInsertFencesForAtomic() returns true, then the target does not 1128 // want to deal with memory orders, and emitLeading/TrailingFence should take 1129 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we 1130 // should preserve the ordering. 1131 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI); 1132 AtomicOrdering MemOpOrder = 1133 ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder; 1134 1135 // In implementations which use a barrier to achieve release semantics, we can 1136 // delay emitting this barrier until we know a store is actually going to be 1137 // attempted. The cost of this delay is that we need 2 copies of the block 1138 // emitting the load-linked, affecting code size. 1139 // 1140 // Ideally, this logic would be unconditional except for the minsize check 1141 // since in other cases the extra blocks naturally collapse down to the 1142 // minimal loop. Unfortunately, this puts too much stress on later 1143 // optimisations so we avoid emitting the extra logic in those cases too. 1144 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && 1145 SuccessOrder != AtomicOrdering::Monotonic && 1146 SuccessOrder != AtomicOrdering::Acquire && 1147 !F->hasMinSize(); 1148 1149 // There's no overhead for sinking the release barrier in a weak cmpxchg, so 1150 // do it even on minsize. 1151 bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak(); 1152 1153 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord 1154 // 1155 // The full expansion we produce is: 1156 // [...] 1157 // %aligned.addr = ... 1158 // cmpxchg.start: 1159 // %unreleasedload = @load.linked(%aligned.addr) 1160 // %unreleasedload.extract = extract value from %unreleasedload 1161 // %should_store = icmp eq %unreleasedload.extract, %desired 1162 // br i1 %should_store, label %cmpxchg.releasingstore, 1163 // label %cmpxchg.nostore 1164 // cmpxchg.releasingstore: 1165 // fence? 1166 // br label cmpxchg.trystore 1167 // cmpxchg.trystore: 1168 // %loaded.trystore = phi [%unreleasedload, %cmpxchg.releasingstore], 1169 // [%releasedload, %cmpxchg.releasedload] 1170 // %updated.new = insert %new into %loaded.trystore 1171 // %stored = @store_conditional(%updated.new, %aligned.addr) 1172 // %success = icmp eq i32 %stored, 0 1173 // br i1 %success, label %cmpxchg.success, 1174 // label %cmpxchg.releasedload/%cmpxchg.failure 1175 // cmpxchg.releasedload: 1176 // %releasedload = @load.linked(%aligned.addr) 1177 // %releasedload.extract = extract value from %releasedload 1178 // %should_store = icmp eq %releasedload.extract, %desired 1179 // br i1 %should_store, label %cmpxchg.trystore, 1180 // label %cmpxchg.failure 1181 // cmpxchg.success: 1182 // fence? 1183 // br label %cmpxchg.end 1184 // cmpxchg.nostore: 1185 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start], 1186 // [%releasedload, 1187 // %cmpxchg.releasedload/%cmpxchg.trystore] 1188 // @load_linked_fail_balance()? 1189 // br label %cmpxchg.failure 1190 // cmpxchg.failure: 1191 // fence? 1192 // br label %cmpxchg.end 1193 // cmpxchg.end: 1194 // %loaded.exit = phi [%loaded.nostore, %cmpxchg.failure], 1195 // [%loaded.trystore, %cmpxchg.trystore] 1196 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure] 1197 // %loaded = extract value from %loaded.exit 1198 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0 1199 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1 1200 // [...] 1201 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end"); 1202 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB); 1203 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB); 1204 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB); 1205 auto ReleasedLoadBB = 1206 BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB); 1207 auto TryStoreBB = 1208 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB); 1209 auto ReleasingStoreBB = 1210 BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB); 1211 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB); 1212 1213 // This grabs the DebugLoc from CI 1214 IRBuilder<> Builder(CI); 1215 1216 // The split call above "helpfully" added a branch at the end of BB (to the 1217 // wrong place), but we might want a fence too. It's easiest to just remove 1218 // the branch entirely. 1219 std::prev(BB->end())->eraseFromParent(); 1220 Builder.SetInsertPoint(BB); 1221 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier) 1222 TLI->emitLeadingFence(Builder, CI, SuccessOrder); 1223 1224 PartwordMaskValues PMV = 1225 createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr, 1226 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8); 1227 Builder.CreateBr(StartBB); 1228 1229 // Start the main loop block now that we've taken care of the preliminaries. 1230 Builder.SetInsertPoint(StartBB); 1231 Value *UnreleasedLoad = 1232 TLI->emitLoadLinked(Builder, PMV.AlignedAddr, MemOpOrder); 1233 Value *UnreleasedLoadExtract = 1234 extractMaskedValue(Builder, UnreleasedLoad, PMV); 1235 Value *ShouldStore = Builder.CreateICmpEQ( 1236 UnreleasedLoadExtract, CI->getCompareOperand(), "should_store"); 1237 1238 // If the cmpxchg doesn't actually need any ordering when it fails, we can 1239 // jump straight past that fence instruction (if it exists). 1240 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB); 1241 1242 Builder.SetInsertPoint(ReleasingStoreBB); 1243 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier) 1244 TLI->emitLeadingFence(Builder, CI, SuccessOrder); 1245 Builder.CreateBr(TryStoreBB); 1246 1247 Builder.SetInsertPoint(TryStoreBB); 1248 PHINode *LoadedTryStore = 1249 Builder.CreatePHI(PMV.WordType, 2, "loaded.trystore"); 1250 LoadedTryStore->addIncoming(UnreleasedLoad, ReleasingStoreBB); 1251 Value *NewValueInsert = 1252 insertMaskedValue(Builder, LoadedTryStore, CI->getNewValOperand(), PMV); 1253 Value *StoreSuccess = 1254 TLI->emitStoreConditional(Builder, NewValueInsert, PMV.AlignedAddr, 1255 MemOpOrder); 1256 StoreSuccess = Builder.CreateICmpEQ( 1257 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success"); 1258 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB; 1259 Builder.CreateCondBr(StoreSuccess, SuccessBB, 1260 CI->isWeak() ? FailureBB : RetryBB); 1261 1262 Builder.SetInsertPoint(ReleasedLoadBB); 1263 Value *SecondLoad; 1264 if (HasReleasedLoadBB) { 1265 SecondLoad = TLI->emitLoadLinked(Builder, PMV.AlignedAddr, MemOpOrder); 1266 Value *SecondLoadExtract = extractMaskedValue(Builder, SecondLoad, PMV); 1267 ShouldStore = Builder.CreateICmpEQ(SecondLoadExtract, 1268 CI->getCompareOperand(), "should_store"); 1269 1270 // If the cmpxchg doesn't actually need any ordering when it fails, we can 1271 // jump straight past that fence instruction (if it exists). 1272 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB); 1273 // Update PHI node in TryStoreBB. 1274 LoadedTryStore->addIncoming(SecondLoad, ReleasedLoadBB); 1275 } else 1276 Builder.CreateUnreachable(); 1277 1278 // Make sure later instructions don't get reordered with a fence if 1279 // necessary. 1280 Builder.SetInsertPoint(SuccessBB); 1281 if (ShouldInsertFencesForAtomic) 1282 TLI->emitTrailingFence(Builder, CI, SuccessOrder); 1283 Builder.CreateBr(ExitBB); 1284 1285 Builder.SetInsertPoint(NoStoreBB); 1286 PHINode *LoadedNoStore = 1287 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.nostore"); 1288 LoadedNoStore->addIncoming(UnreleasedLoad, StartBB); 1289 if (HasReleasedLoadBB) 1290 LoadedNoStore->addIncoming(SecondLoad, ReleasedLoadBB); 1291 1292 // In the failing case, where we don't execute the store-conditional, the 1293 // target might want to balance out the load-linked with a dedicated 1294 // instruction (e.g., on ARM, clearing the exclusive monitor). 1295 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 1296 Builder.CreateBr(FailureBB); 1297 1298 Builder.SetInsertPoint(FailureBB); 1299 PHINode *LoadedFailure = 1300 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.failure"); 1301 LoadedFailure->addIncoming(LoadedNoStore, NoStoreBB); 1302 if (CI->isWeak()) 1303 LoadedFailure->addIncoming(LoadedTryStore, TryStoreBB); 1304 if (ShouldInsertFencesForAtomic) 1305 TLI->emitTrailingFence(Builder, CI, FailureOrder); 1306 Builder.CreateBr(ExitBB); 1307 1308 // Finally, we have control-flow based knowledge of whether the cmpxchg 1309 // succeeded or not. We expose this to later passes by converting any 1310 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate 1311 // PHI. 1312 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1313 PHINode *LoadedExit = 1314 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit"); 1315 LoadedExit->addIncoming(LoadedTryStore, SuccessBB); 1316 LoadedExit->addIncoming(LoadedFailure, FailureBB); 1317 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2, "success"); 1318 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB); 1319 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB); 1320 1321 // This is the "exit value" from the cmpxchg expansion. It may be of 1322 // a type wider than the one in the cmpxchg instruction. 1323 Value *LoadedFull = LoadedExit; 1324 1325 Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator())); 1326 Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV); 1327 1328 // Look for any users of the cmpxchg that are just comparing the loaded value 1329 // against the desired one, and replace them with the CFG-derived version. 1330 SmallVector<ExtractValueInst *, 2> PrunedInsts; 1331 for (auto User : CI->users()) { 1332 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User); 1333 if (!EV) 1334 continue; 1335 1336 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 && 1337 "weird extraction from { iN, i1 }"); 1338 1339 if (EV->getIndices()[0] == 0) 1340 EV->replaceAllUsesWith(Loaded); 1341 else 1342 EV->replaceAllUsesWith(Success); 1343 1344 PrunedInsts.push_back(EV); 1345 } 1346 1347 // We can remove the instructions now we're no longer iterating through them. 1348 for (auto EV : PrunedInsts) 1349 EV->eraseFromParent(); 1350 1351 if (!CI->use_empty()) { 1352 // Some use of the full struct return that we don't understand has happened, 1353 // so we've got to reconstruct it properly. 1354 Value *Res; 1355 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0); 1356 Res = Builder.CreateInsertValue(Res, Success, 1); 1357 1358 CI->replaceAllUsesWith(Res); 1359 } 1360 1361 CI->eraseFromParent(); 1362 return true; 1363 } 1364 1365 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) { 1366 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand()); 1367 if(!C) 1368 return false; 1369 1370 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 1371 switch(Op) { 1372 case AtomicRMWInst::Add: 1373 case AtomicRMWInst::Sub: 1374 case AtomicRMWInst::Or: 1375 case AtomicRMWInst::Xor: 1376 return C->isZero(); 1377 case AtomicRMWInst::And: 1378 return C->isMinusOne(); 1379 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/... 1380 default: 1381 return false; 1382 } 1383 } 1384 1385 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) { 1386 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { 1387 tryExpandAtomicLoad(ResultingLoad); 1388 return true; 1389 } 1390 return false; 1391 } 1392 1393 Value *AtomicExpand::insertRMWCmpXchgLoop( 1394 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign, 1395 AtomicOrdering MemOpOrder, 1396 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp, 1397 CreateCmpXchgInstFun CreateCmpXchg) { 1398 LLVMContext &Ctx = Builder.getContext(); 1399 BasicBlock *BB = Builder.GetInsertBlock(); 1400 Function *F = BB->getParent(); 1401 1402 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 1403 // 1404 // The standard expansion we produce is: 1405 // [...] 1406 // %init_loaded = load atomic iN* %addr 1407 // br label %loop 1408 // loop: 1409 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ] 1410 // %new = some_op iN %loaded, %incr 1411 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new 1412 // %new_loaded = extractvalue { iN, i1 } %pair, 0 1413 // %success = extractvalue { iN, i1 } %pair, 1 1414 // br i1 %success, label %atomicrmw.end, label %loop 1415 // atomicrmw.end: 1416 // [...] 1417 BasicBlock *ExitBB = 1418 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end"); 1419 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 1420 1421 // The split call above "helpfully" added a branch at the end of BB (to the 1422 // wrong place), but we want a load. It's easiest to just remove 1423 // the branch entirely. 1424 std::prev(BB->end())->eraseFromParent(); 1425 Builder.SetInsertPoint(BB); 1426 LoadInst *InitLoaded = Builder.CreateAlignedLoad(ResultTy, Addr, AddrAlign); 1427 Builder.CreateBr(LoopBB); 1428 1429 // Start the main loop block now that we've taken care of the preliminaries. 1430 Builder.SetInsertPoint(LoopBB); 1431 PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded"); 1432 Loaded->addIncoming(InitLoaded, BB); 1433 1434 Value *NewVal = PerformOp(Builder, Loaded); 1435 1436 Value *NewLoaded = nullptr; 1437 Value *Success = nullptr; 1438 1439 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign, 1440 MemOpOrder == AtomicOrdering::Unordered 1441 ? AtomicOrdering::Monotonic 1442 : MemOpOrder, 1443 Success, NewLoaded); 1444 assert(Success && NewLoaded); 1445 1446 Loaded->addIncoming(NewLoaded, LoopBB); 1447 1448 Builder.CreateCondBr(Success, ExitBB, LoopBB); 1449 1450 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1451 return NewLoaded; 1452 } 1453 1454 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 1455 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 1456 unsigned ValueSize = getAtomicOpSize(CI); 1457 1458 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) { 1459 default: 1460 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg"); 1461 case TargetLoweringBase::AtomicExpansionKind::None: 1462 if (ValueSize < MinCASSize) 1463 return expandPartwordCmpXchg(CI); 1464 return false; 1465 case TargetLoweringBase::AtomicExpansionKind::LLSC: { 1466 return expandAtomicCmpXchg(CI); 1467 } 1468 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: 1469 expandAtomicCmpXchgToMaskedIntrinsic(CI); 1470 return true; 1471 } 1472 } 1473 1474 // Note: This function is exposed externally by AtomicExpandUtils.h 1475 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 1476 CreateCmpXchgInstFun CreateCmpXchg) { 1477 IRBuilder<> Builder(AI); 1478 Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop( 1479 Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(), 1480 AI->getOrdering(), 1481 [&](IRBuilder<> &Builder, Value *Loaded) { 1482 return performAtomicOp(AI->getOperation(), Builder, Loaded, 1483 AI->getValOperand()); 1484 }, 1485 CreateCmpXchg); 1486 1487 AI->replaceAllUsesWith(Loaded); 1488 AI->eraseFromParent(); 1489 return true; 1490 } 1491 1492 // In order to use one of the sized library calls such as 1493 // __atomic_fetch_add_4, the alignment must be sufficient, the size 1494 // must be one of the potentially-specialized sizes, and the value 1495 // type must actually exist in C on the target (otherwise, the 1496 // function wouldn't actually be defined.) 1497 static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, 1498 const DataLayout &DL) { 1499 // TODO: "LargestSize" is an approximation for "largest type that 1500 // you can express in C". It seems to be the case that int128 is 1501 // supported on all 64-bit platforms, otherwise only up to 64-bit 1502 // integers are supported. If we get this wrong, then we'll try to 1503 // call a sized libcall that doesn't actually exist. There should 1504 // really be some more reliable way in LLVM of determining integer 1505 // sizes which are valid in the target's C ABI... 1506 unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8; 1507 return Alignment >= Size && 1508 (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) && 1509 Size <= LargestSize; 1510 } 1511 1512 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) { 1513 static const RTLIB::Libcall Libcalls[6] = { 1514 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2, 1515 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16}; 1516 unsigned Size = getAtomicOpSize(I); 1517 1518 bool expanded = expandAtomicOpToLibcall( 1519 I, Size, I->getAlign(), I->getPointerOperand(), nullptr, nullptr, 1520 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1521 if (!expanded) 1522 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load"); 1523 } 1524 1525 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) { 1526 static const RTLIB::Libcall Libcalls[6] = { 1527 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2, 1528 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16}; 1529 unsigned Size = getAtomicOpSize(I); 1530 1531 bool expanded = expandAtomicOpToLibcall( 1532 I, Size, I->getAlign(), I->getPointerOperand(), I->getValueOperand(), 1533 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1534 if (!expanded) 1535 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store"); 1536 } 1537 1538 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) { 1539 static const RTLIB::Libcall Libcalls[6] = { 1540 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1, 1541 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4, 1542 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16}; 1543 unsigned Size = getAtomicOpSize(I); 1544 1545 bool expanded = expandAtomicOpToLibcall( 1546 I, Size, I->getAlign(), I->getPointerOperand(), I->getNewValOperand(), 1547 I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(), 1548 Libcalls); 1549 if (!expanded) 1550 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for CAS"); 1551 } 1552 1553 static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) { 1554 static const RTLIB::Libcall LibcallsXchg[6] = { 1555 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1, 1556 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4, 1557 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16}; 1558 static const RTLIB::Libcall LibcallsAdd[6] = { 1559 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1, 1560 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4, 1561 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16}; 1562 static const RTLIB::Libcall LibcallsSub[6] = { 1563 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1, 1564 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4, 1565 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16}; 1566 static const RTLIB::Libcall LibcallsAnd[6] = { 1567 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1, 1568 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4, 1569 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16}; 1570 static const RTLIB::Libcall LibcallsOr[6] = { 1571 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1, 1572 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4, 1573 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16}; 1574 static const RTLIB::Libcall LibcallsXor[6] = { 1575 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1, 1576 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4, 1577 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16}; 1578 static const RTLIB::Libcall LibcallsNand[6] = { 1579 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1, 1580 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4, 1581 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16}; 1582 1583 switch (Op) { 1584 case AtomicRMWInst::BAD_BINOP: 1585 llvm_unreachable("Should not have BAD_BINOP."); 1586 case AtomicRMWInst::Xchg: 1587 return makeArrayRef(LibcallsXchg); 1588 case AtomicRMWInst::Add: 1589 return makeArrayRef(LibcallsAdd); 1590 case AtomicRMWInst::Sub: 1591 return makeArrayRef(LibcallsSub); 1592 case AtomicRMWInst::And: 1593 return makeArrayRef(LibcallsAnd); 1594 case AtomicRMWInst::Or: 1595 return makeArrayRef(LibcallsOr); 1596 case AtomicRMWInst::Xor: 1597 return makeArrayRef(LibcallsXor); 1598 case AtomicRMWInst::Nand: 1599 return makeArrayRef(LibcallsNand); 1600 case AtomicRMWInst::Max: 1601 case AtomicRMWInst::Min: 1602 case AtomicRMWInst::UMax: 1603 case AtomicRMWInst::UMin: 1604 case AtomicRMWInst::FAdd: 1605 case AtomicRMWInst::FSub: 1606 // No atomic libcalls are available for max/min/umax/umin. 1607 return {}; 1608 } 1609 llvm_unreachable("Unexpected AtomicRMW operation."); 1610 } 1611 1612 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) { 1613 ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation()); 1614 1615 unsigned Size = getAtomicOpSize(I); 1616 1617 bool Success = false; 1618 if (!Libcalls.empty()) 1619 Success = expandAtomicOpToLibcall( 1620 I, Size, I->getAlign(), I->getPointerOperand(), I->getValOperand(), 1621 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1622 1623 // The expansion failed: either there were no libcalls at all for 1624 // the operation (min/max), or there were only size-specialized 1625 // libcalls (add/sub/etc) and we needed a generic. So, expand to a 1626 // CAS libcall, via a CAS loop, instead. 1627 if (!Success) { 1628 expandAtomicRMWToCmpXchg( 1629 I, [this](IRBuilder<> &Builder, Value *Addr, Value *Loaded, 1630 Value *NewVal, Align Alignment, AtomicOrdering MemOpOrder, 1631 Value *&Success, Value *&NewLoaded) { 1632 // Create the CAS instruction normally... 1633 AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg( 1634 Addr, Loaded, NewVal, Alignment, MemOpOrder, 1635 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); 1636 Success = Builder.CreateExtractValue(Pair, 1, "success"); 1637 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 1638 1639 // ...and then expand the CAS into a libcall. 1640 expandAtomicCASToLibcall(Pair); 1641 }); 1642 } 1643 } 1644 1645 // A helper routine for the above expandAtomic*ToLibcall functions. 1646 // 1647 // 'Libcalls' contains an array of enum values for the particular 1648 // ATOMIC libcalls to be emitted. All of the other arguments besides 1649 // 'I' are extracted from the Instruction subclass by the 1650 // caller. Depending on the particular call, some will be null. 1651 bool AtomicExpand::expandAtomicOpToLibcall( 1652 Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand, 1653 Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering, 1654 AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) { 1655 assert(Libcalls.size() == 6); 1656 1657 LLVMContext &Ctx = I->getContext(); 1658 Module *M = I->getModule(); 1659 const DataLayout &DL = M->getDataLayout(); 1660 IRBuilder<> Builder(I); 1661 IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front()); 1662 1663 bool UseSizedLibcall = canUseSizedAtomicCall(Size, Alignment, DL); 1664 Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8); 1665 1666 const Align AllocaAlignment = DL.getPrefTypeAlign(SizedIntTy); 1667 1668 // TODO: the "order" argument type is "int", not int32. So 1669 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints. 1670 ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size); 1671 assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO"); 1672 Constant *OrderingVal = 1673 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering)); 1674 Constant *Ordering2Val = nullptr; 1675 if (CASExpected) { 1676 assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO"); 1677 Ordering2Val = 1678 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2)); 1679 } 1680 bool HasResult = I->getType() != Type::getVoidTy(Ctx); 1681 1682 RTLIB::Libcall RTLibType; 1683 if (UseSizedLibcall) { 1684 switch (Size) { 1685 case 1: RTLibType = Libcalls[1]; break; 1686 case 2: RTLibType = Libcalls[2]; break; 1687 case 4: RTLibType = Libcalls[3]; break; 1688 case 8: RTLibType = Libcalls[4]; break; 1689 case 16: RTLibType = Libcalls[5]; break; 1690 } 1691 } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) { 1692 RTLibType = Libcalls[0]; 1693 } else { 1694 // Can't use sized function, and there's no generic for this 1695 // operation, so give up. 1696 return false; 1697 } 1698 1699 if (!TLI->getLibcallName(RTLibType)) { 1700 // This target does not implement the requested atomic libcall so give up. 1701 return false; 1702 } 1703 1704 // Build up the function call. There's two kinds. First, the sized 1705 // variants. These calls are going to be one of the following (with 1706 // N=1,2,4,8,16): 1707 // iN __atomic_load_N(iN *ptr, int ordering) 1708 // void __atomic_store_N(iN *ptr, iN val, int ordering) 1709 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering) 1710 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired, 1711 // int success_order, int failure_order) 1712 // 1713 // Note that these functions can be used for non-integer atomic 1714 // operations, the values just need to be bitcast to integers on the 1715 // way in and out. 1716 // 1717 // And, then, the generic variants. They look like the following: 1718 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering) 1719 // void __atomic_store(size_t size, void *ptr, void *val, int ordering) 1720 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret, 1721 // int ordering) 1722 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected, 1723 // void *desired, int success_order, 1724 // int failure_order) 1725 // 1726 // The different signatures are built up depending on the 1727 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult' 1728 // variables. 1729 1730 AllocaInst *AllocaCASExpected = nullptr; 1731 Value *AllocaCASExpected_i8 = nullptr; 1732 AllocaInst *AllocaValue = nullptr; 1733 Value *AllocaValue_i8 = nullptr; 1734 AllocaInst *AllocaResult = nullptr; 1735 Value *AllocaResult_i8 = nullptr; 1736 1737 Type *ResultTy; 1738 SmallVector<Value *, 6> Args; 1739 AttributeList Attr; 1740 1741 // 'size' argument. 1742 if (!UseSizedLibcall) { 1743 // Note, getIntPtrType is assumed equivalent to size_t. 1744 Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size)); 1745 } 1746 1747 // 'ptr' argument. 1748 // note: This assumes all address spaces share a common libfunc 1749 // implementation and that addresses are convertable. For systems without 1750 // that property, we'd need to extend this mechanism to support AS-specific 1751 // families of atomic intrinsics. 1752 auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace(); 1753 Value *PtrVal = Builder.CreateBitCast(PointerOperand, 1754 Type::getInt8PtrTy(Ctx, PtrTypeAS)); 1755 PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx)); 1756 Args.push_back(PtrVal); 1757 1758 // 'expected' argument, if present. 1759 if (CASExpected) { 1760 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType()); 1761 AllocaCASExpected->setAlignment(AllocaAlignment); 1762 unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace(); 1763 1764 AllocaCASExpected_i8 = 1765 Builder.CreateBitCast(AllocaCASExpected, 1766 Type::getInt8PtrTy(Ctx, AllocaAS)); 1767 Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64); 1768 Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment); 1769 Args.push_back(AllocaCASExpected_i8); 1770 } 1771 1772 // 'val' argument ('desired' for cas), if present. 1773 if (ValueOperand) { 1774 if (UseSizedLibcall) { 1775 Value *IntValue = 1776 Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy); 1777 Args.push_back(IntValue); 1778 } else { 1779 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType()); 1780 AllocaValue->setAlignment(AllocaAlignment); 1781 AllocaValue_i8 = 1782 Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx)); 1783 Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64); 1784 Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment); 1785 Args.push_back(AllocaValue_i8); 1786 } 1787 } 1788 1789 // 'ret' argument. 1790 if (!CASExpected && HasResult && !UseSizedLibcall) { 1791 AllocaResult = AllocaBuilder.CreateAlloca(I->getType()); 1792 AllocaResult->setAlignment(AllocaAlignment); 1793 unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace(); 1794 AllocaResult_i8 = 1795 Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS)); 1796 Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64); 1797 Args.push_back(AllocaResult_i8); 1798 } 1799 1800 // 'ordering' ('success_order' for cas) argument. 1801 Args.push_back(OrderingVal); 1802 1803 // 'failure_order' argument, if present. 1804 if (Ordering2Val) 1805 Args.push_back(Ordering2Val); 1806 1807 // Now, the return type. 1808 if (CASExpected) { 1809 ResultTy = Type::getInt1Ty(Ctx); 1810 Attr = Attr.addAttribute(Ctx, AttributeList::ReturnIndex, Attribute::ZExt); 1811 } else if (HasResult && UseSizedLibcall) 1812 ResultTy = SizedIntTy; 1813 else 1814 ResultTy = Type::getVoidTy(Ctx); 1815 1816 // Done with setting up arguments and return types, create the call: 1817 SmallVector<Type *, 6> ArgTys; 1818 for (Value *Arg : Args) 1819 ArgTys.push_back(Arg->getType()); 1820 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false); 1821 FunctionCallee LibcallFn = 1822 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr); 1823 CallInst *Call = Builder.CreateCall(LibcallFn, Args); 1824 Call->setAttributes(Attr); 1825 Value *Result = Call; 1826 1827 // And then, extract the results... 1828 if (ValueOperand && !UseSizedLibcall) 1829 Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64); 1830 1831 if (CASExpected) { 1832 // The final result from the CAS is {load of 'expected' alloca, bool result 1833 // from call} 1834 Type *FinalResultTy = I->getType(); 1835 Value *V = UndefValue::get(FinalResultTy); 1836 Value *ExpectedOut = Builder.CreateAlignedLoad( 1837 CASExpected->getType(), AllocaCASExpected, AllocaAlignment); 1838 Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64); 1839 V = Builder.CreateInsertValue(V, ExpectedOut, 0); 1840 V = Builder.CreateInsertValue(V, Result, 1); 1841 I->replaceAllUsesWith(V); 1842 } else if (HasResult) { 1843 Value *V; 1844 if (UseSizedLibcall) 1845 V = Builder.CreateBitOrPointerCast(Result, I->getType()); 1846 else { 1847 V = Builder.CreateAlignedLoad(I->getType(), AllocaResult, 1848 AllocaAlignment); 1849 Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64); 1850 } 1851 I->replaceAllUsesWith(V); 1852 } 1853 I->eraseFromParent(); 1854 return true; 1855 } 1856