1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains a pass (at IR level) to replace atomic instructions with 10 // __atomic_* library calls, or target specific instruction which implement the 11 // same semantics in a way which better fits the target backend. This can 12 // include the use of (intrinsic-based) load-linked/store-conditional loops, 13 // AtomicCmpXchg, or type coercions. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/CodeGen/AtomicExpandUtils.h" 21 #include "llvm/CodeGen/RuntimeLibcalls.h" 22 #include "llvm/CodeGen/TargetLowering.h" 23 #include "llvm/CodeGen/TargetPassConfig.h" 24 #include "llvm/CodeGen/TargetSubtargetInfo.h" 25 #include "llvm/CodeGen/ValueTypes.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/BasicBlock.h" 28 #include "llvm/IR/Constant.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DerivedTypes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/IRBuilder.h" 34 #include "llvm/IR/InstIterator.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/IR/User.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/AtomicOrdering.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/ErrorHandling.h" 46 #include "llvm/Support/raw_ostream.h" 47 #include "llvm/Target/TargetMachine.h" 48 #include <cassert> 49 #include <cstdint> 50 #include <iterator> 51 52 using namespace llvm; 53 54 #define DEBUG_TYPE "atomic-expand" 55 56 namespace { 57 58 class AtomicExpand: public FunctionPass { 59 const TargetLowering *TLI = nullptr; 60 61 public: 62 static char ID; // Pass identification, replacement for typeid 63 64 AtomicExpand() : FunctionPass(ID) { 65 initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); 66 } 67 68 bool runOnFunction(Function &F) override; 69 70 private: 71 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order); 72 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL); 73 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI); 74 bool tryExpandAtomicLoad(LoadInst *LI); 75 bool expandAtomicLoadToLL(LoadInst *LI); 76 bool expandAtomicLoadToCmpXchg(LoadInst *LI); 77 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI); 78 bool expandAtomicStore(StoreInst *SI); 79 bool tryExpandAtomicRMW(AtomicRMWInst *AI); 80 Value * 81 insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr, 82 AtomicOrdering MemOpOrder, 83 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp); 84 void expandAtomicOpToLLSC( 85 Instruction *I, Type *ResultTy, Value *Addr, AtomicOrdering MemOpOrder, 86 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp); 87 void expandPartwordAtomicRMW( 88 AtomicRMWInst *I, 89 TargetLoweringBase::AtomicExpansionKind ExpansionKind); 90 AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI); 91 void expandPartwordCmpXchg(AtomicCmpXchgInst *I); 92 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI); 93 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); 94 95 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); 96 static Value *insertRMWCmpXchgLoop( 97 IRBuilder<> &Builder, Type *ResultType, Value *Addr, 98 AtomicOrdering MemOpOrder, 99 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp, 100 CreateCmpXchgInstFun CreateCmpXchg); 101 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI); 102 103 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 104 bool isIdempotentRMW(AtomicRMWInst *RMWI); 105 bool simplifyIdempotentRMW(AtomicRMWInst *RMWI); 106 107 bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, unsigned Align, 108 Value *PointerOperand, Value *ValueOperand, 109 Value *CASExpected, AtomicOrdering Ordering, 110 AtomicOrdering Ordering2, 111 ArrayRef<RTLIB::Libcall> Libcalls); 112 void expandAtomicLoadToLibcall(LoadInst *LI); 113 void expandAtomicStoreToLibcall(StoreInst *LI); 114 void expandAtomicRMWToLibcall(AtomicRMWInst *I); 115 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I); 116 117 friend bool 118 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 119 CreateCmpXchgInstFun CreateCmpXchg); 120 }; 121 122 } // end anonymous namespace 123 124 char AtomicExpand::ID = 0; 125 126 char &llvm::AtomicExpandID = AtomicExpand::ID; 127 128 INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", 129 false, false) 130 131 FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); } 132 133 // Helper functions to retrieve the size of atomic instructions. 134 static unsigned getAtomicOpSize(LoadInst *LI) { 135 const DataLayout &DL = LI->getModule()->getDataLayout(); 136 return DL.getTypeStoreSize(LI->getType()); 137 } 138 139 static unsigned getAtomicOpSize(StoreInst *SI) { 140 const DataLayout &DL = SI->getModule()->getDataLayout(); 141 return DL.getTypeStoreSize(SI->getValueOperand()->getType()); 142 } 143 144 static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) { 145 const DataLayout &DL = RMWI->getModule()->getDataLayout(); 146 return DL.getTypeStoreSize(RMWI->getValOperand()->getType()); 147 } 148 149 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) { 150 const DataLayout &DL = CASI->getModule()->getDataLayout(); 151 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType()); 152 } 153 154 // Helper functions to retrieve the alignment of atomic instructions. 155 static unsigned getAtomicOpAlign(LoadInst *LI) { 156 unsigned Align = LI->getAlignment(); 157 // In the future, if this IR restriction is relaxed, we should 158 // return DataLayout::getABITypeAlignment when there's no align 159 // value. 160 assert(Align != 0 && "An atomic LoadInst always has an explicit alignment"); 161 return Align; 162 } 163 164 static unsigned getAtomicOpAlign(StoreInst *SI) { 165 unsigned Align = SI->getAlignment(); 166 // In the future, if this IR restriction is relaxed, we should 167 // return DataLayout::getABITypeAlignment when there's no align 168 // value. 169 assert(Align != 0 && "An atomic StoreInst always has an explicit alignment"); 170 return Align; 171 } 172 173 static unsigned getAtomicOpAlign(AtomicRMWInst *RMWI) { 174 // TODO(PR27168): This instruction has no alignment attribute, but unlike the 175 // default alignment for load/store, the default here is to assume 176 // it has NATURAL alignment, not DataLayout-specified alignment. 177 const DataLayout &DL = RMWI->getModule()->getDataLayout(); 178 return DL.getTypeStoreSize(RMWI->getValOperand()->getType()); 179 } 180 181 static unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) { 182 // TODO(PR27168): same comment as above. 183 const DataLayout &DL = CASI->getModule()->getDataLayout(); 184 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType()); 185 } 186 187 // Determine if a particular atomic operation has a supported size, 188 // and is of appropriate alignment, to be passed through for target 189 // lowering. (Versus turning into a __atomic libcall) 190 template <typename Inst> 191 static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) { 192 unsigned Size = getAtomicOpSize(I); 193 unsigned Align = getAtomicOpAlign(I); 194 return Align >= Size && Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8; 195 } 196 197 bool AtomicExpand::runOnFunction(Function &F) { 198 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 199 if (!TPC) 200 return false; 201 202 auto &TM = TPC->getTM<TargetMachine>(); 203 if (!TM.getSubtargetImpl(F)->enableAtomicExpand()) 204 return false; 205 TLI = TM.getSubtargetImpl(F)->getTargetLowering(); 206 207 SmallVector<Instruction *, 1> AtomicInsts; 208 209 // Changing control-flow while iterating through it is a bad idea, so gather a 210 // list of all atomic instructions before we start. 211 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 212 Instruction *I = &*II; 213 if (I->isAtomic() && !isa<FenceInst>(I)) 214 AtomicInsts.push_back(I); 215 } 216 217 bool MadeChange = false; 218 for (auto I : AtomicInsts) { 219 auto LI = dyn_cast<LoadInst>(I); 220 auto SI = dyn_cast<StoreInst>(I); 221 auto RMWI = dyn_cast<AtomicRMWInst>(I); 222 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); 223 assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction"); 224 225 // If the Size/Alignment is not supported, replace with a libcall. 226 if (LI) { 227 if (!atomicSizeSupported(TLI, LI)) { 228 expandAtomicLoadToLibcall(LI); 229 MadeChange = true; 230 continue; 231 } 232 } else if (SI) { 233 if (!atomicSizeSupported(TLI, SI)) { 234 expandAtomicStoreToLibcall(SI); 235 MadeChange = true; 236 continue; 237 } 238 } else if (RMWI) { 239 if (!atomicSizeSupported(TLI, RMWI)) { 240 expandAtomicRMWToLibcall(RMWI); 241 MadeChange = true; 242 continue; 243 } 244 } else if (CASI) { 245 if (!atomicSizeSupported(TLI, CASI)) { 246 expandAtomicCASToLibcall(CASI); 247 MadeChange = true; 248 continue; 249 } 250 } 251 252 if (TLI->shouldInsertFencesForAtomic(I)) { 253 auto FenceOrdering = AtomicOrdering::Monotonic; 254 if (LI && isAcquireOrStronger(LI->getOrdering())) { 255 FenceOrdering = LI->getOrdering(); 256 LI->setOrdering(AtomicOrdering::Monotonic); 257 } else if (SI && isReleaseOrStronger(SI->getOrdering())) { 258 FenceOrdering = SI->getOrdering(); 259 SI->setOrdering(AtomicOrdering::Monotonic); 260 } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) || 261 isAcquireOrStronger(RMWI->getOrdering()))) { 262 FenceOrdering = RMWI->getOrdering(); 263 RMWI->setOrdering(AtomicOrdering::Monotonic); 264 } else if (CASI && 265 TLI->shouldExpandAtomicCmpXchgInIR(CASI) == 266 TargetLoweringBase::AtomicExpansionKind::None && 267 (isReleaseOrStronger(CASI->getSuccessOrdering()) || 268 isAcquireOrStronger(CASI->getSuccessOrdering()))) { 269 // If a compare and swap is lowered to LL/SC, we can do smarter fence 270 // insertion, with a stronger one on the success path than on the 271 // failure path. As a result, fence insertion is directly done by 272 // expandAtomicCmpXchg in that case. 273 FenceOrdering = CASI->getSuccessOrdering(); 274 CASI->setSuccessOrdering(AtomicOrdering::Monotonic); 275 CASI->setFailureOrdering(AtomicOrdering::Monotonic); 276 } 277 278 if (FenceOrdering != AtomicOrdering::Monotonic) { 279 MadeChange |= bracketInstWithFences(I, FenceOrdering); 280 } 281 } 282 283 if (LI) { 284 if (LI->getType()->isFloatingPointTy()) { 285 // TODO: add a TLI hook to control this so that each target can 286 // convert to lowering the original type one at a time. 287 LI = convertAtomicLoadToIntegerType(LI); 288 assert(LI->getType()->isIntegerTy() && "invariant broken"); 289 MadeChange = true; 290 } 291 292 MadeChange |= tryExpandAtomicLoad(LI); 293 } else if (SI) { 294 if (SI->getValueOperand()->getType()->isFloatingPointTy()) { 295 // TODO: add a TLI hook to control this so that each target can 296 // convert to lowering the original type one at a time. 297 SI = convertAtomicStoreToIntegerType(SI); 298 assert(SI->getValueOperand()->getType()->isIntegerTy() && 299 "invariant broken"); 300 MadeChange = true; 301 } 302 303 if (TLI->shouldExpandAtomicStoreInIR(SI)) 304 MadeChange |= expandAtomicStore(SI); 305 } else if (RMWI) { 306 // There are two different ways of expanding RMW instructions: 307 // - into a load if it is idempotent 308 // - into a Cmpxchg/LL-SC loop otherwise 309 // we try them in that order. 310 311 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) { 312 MadeChange = true; 313 } else { 314 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 315 unsigned ValueSize = getAtomicOpSize(RMWI); 316 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 317 if (ValueSize < MinCASSize && 318 (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || 319 Op == AtomicRMWInst::And)) { 320 RMWI = widenPartwordAtomicRMW(RMWI); 321 MadeChange = true; 322 } 323 324 MadeChange |= tryExpandAtomicRMW(RMWI); 325 } 326 } else if (CASI) { 327 // TODO: when we're ready to make the change at the IR level, we can 328 // extend convertCmpXchgToInteger for floating point too. 329 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() && 330 "unimplemented - floating point not legal at IR level"); 331 if (CASI->getCompareOperand()->getType()->isPointerTy() ) { 332 // TODO: add a TLI hook to control this so that each target can 333 // convert to lowering the original type one at a time. 334 CASI = convertCmpXchgToIntegerType(CASI); 335 assert(CASI->getCompareOperand()->getType()->isIntegerTy() && 336 "invariant broken"); 337 MadeChange = true; 338 } 339 340 MadeChange |= tryExpandAtomicCmpXchg(CASI); 341 } 342 } 343 return MadeChange; 344 } 345 346 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) { 347 IRBuilder<> Builder(I); 348 349 auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order); 350 351 auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order); 352 // We have a guard here because not every atomic operation generates a 353 // trailing fence. 354 if (TrailingFence) 355 TrailingFence->moveAfter(I); 356 357 return (LeadingFence || TrailingFence); 358 } 359 360 /// Get the iX type with the same bitwidth as T. 361 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T, 362 const DataLayout &DL) { 363 EVT VT = TLI->getValueType(DL, T); 364 unsigned BitWidth = VT.getStoreSizeInBits(); 365 assert(BitWidth == VT.getSizeInBits() && "must be a power of two"); 366 return IntegerType::get(T->getContext(), BitWidth); 367 } 368 369 /// Convert an atomic load of a non-integral type to an integer load of the 370 /// equivalent bitwidth. See the function comment on 371 /// convertAtomicStoreToIntegerType for background. 372 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) { 373 auto *M = LI->getModule(); 374 Type *NewTy = getCorrespondingIntegerType(LI->getType(), 375 M->getDataLayout()); 376 377 IRBuilder<> Builder(LI); 378 379 Value *Addr = LI->getPointerOperand(); 380 Type *PT = PointerType::get(NewTy, 381 Addr->getType()->getPointerAddressSpace()); 382 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 383 384 auto *NewLI = Builder.CreateLoad(NewAddr); 385 NewLI->setAlignment(LI->getAlignment()); 386 NewLI->setVolatile(LI->isVolatile()); 387 NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID()); 388 LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n"); 389 390 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType()); 391 LI->replaceAllUsesWith(NewVal); 392 LI->eraseFromParent(); 393 return NewLI; 394 } 395 396 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { 397 switch (TLI->shouldExpandAtomicLoadInIR(LI)) { 398 case TargetLoweringBase::AtomicExpansionKind::None: 399 return false; 400 case TargetLoweringBase::AtomicExpansionKind::LLSC: 401 expandAtomicOpToLLSC( 402 LI, LI->getType(), LI->getPointerOperand(), LI->getOrdering(), 403 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; }); 404 return true; 405 case TargetLoweringBase::AtomicExpansionKind::LLOnly: 406 return expandAtomicLoadToLL(LI); 407 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: 408 return expandAtomicLoadToCmpXchg(LI); 409 default: 410 llvm_unreachable("Unhandled case in tryExpandAtomicLoad"); 411 } 412 } 413 414 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { 415 IRBuilder<> Builder(LI); 416 417 // On some architectures, load-linked instructions are atomic for larger 418 // sizes than normal loads. For example, the only 64-bit load guaranteed 419 // to be single-copy atomic by ARM is an ldrexd (A3.5.3). 420 Value *Val = 421 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering()); 422 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 423 424 LI->replaceAllUsesWith(Val); 425 LI->eraseFromParent(); 426 427 return true; 428 } 429 430 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) { 431 IRBuilder<> Builder(LI); 432 AtomicOrdering Order = LI->getOrdering(); 433 Value *Addr = LI->getPointerOperand(); 434 Type *Ty = cast<PointerType>(Addr->getType())->getElementType(); 435 Constant *DummyVal = Constant::getNullValue(Ty); 436 437 Value *Pair = Builder.CreateAtomicCmpXchg( 438 Addr, DummyVal, DummyVal, Order, 439 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); 440 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded"); 441 442 LI->replaceAllUsesWith(Loaded); 443 LI->eraseFromParent(); 444 445 return true; 446 } 447 448 /// Convert an atomic store of a non-integral type to an integer store of the 449 /// equivalent bitwidth. We used to not support floating point or vector 450 /// atomics in the IR at all. The backends learned to deal with the bitcast 451 /// idiom because that was the only way of expressing the notion of a atomic 452 /// float or vector store. The long term plan is to teach each backend to 453 /// instruction select from the original atomic store, but as a migration 454 /// mechanism, we convert back to the old format which the backends understand. 455 /// Each backend will need individual work to recognize the new format. 456 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { 457 IRBuilder<> Builder(SI); 458 auto *M = SI->getModule(); 459 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(), 460 M->getDataLayout()); 461 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy); 462 463 Value *Addr = SI->getPointerOperand(); 464 Type *PT = PointerType::get(NewTy, 465 Addr->getType()->getPointerAddressSpace()); 466 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 467 468 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr); 469 NewSI->setAlignment(SI->getAlignment()); 470 NewSI->setVolatile(SI->isVolatile()); 471 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID()); 472 LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n"); 473 SI->eraseFromParent(); 474 return NewSI; 475 } 476 477 bool AtomicExpand::expandAtomicStore(StoreInst *SI) { 478 // This function is only called on atomic stores that are too large to be 479 // atomic if implemented as a native store. So we replace them by an 480 // atomic swap, that can be implemented for example as a ldrex/strex on ARM 481 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes. 482 // It is the responsibility of the target to only signal expansion via 483 // shouldExpandAtomicRMW in cases where this is required and possible. 484 IRBuilder<> Builder(SI); 485 AtomicRMWInst *AI = 486 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(), 487 SI->getValueOperand(), SI->getOrdering()); 488 SI->eraseFromParent(); 489 490 // Now we have an appropriate swap instruction, lower it as usual. 491 return tryExpandAtomicRMW(AI); 492 } 493 494 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr, 495 Value *Loaded, Value *NewVal, 496 AtomicOrdering MemOpOrder, 497 Value *&Success, Value *&NewLoaded) { 498 Type *OrigTy = NewVal->getType(); 499 500 // This code can go away when cmpxchg supports FP types. 501 bool NeedBitcast = OrigTy->isFloatingPointTy(); 502 if (NeedBitcast) { 503 IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits()); 504 unsigned AS = Addr->getType()->getPointerAddressSpace(); 505 Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS)); 506 NewVal = Builder.CreateBitCast(NewVal, IntTy); 507 Loaded = Builder.CreateBitCast(Loaded, IntTy); 508 } 509 510 Value* Pair = Builder.CreateAtomicCmpXchg( 511 Addr, Loaded, NewVal, MemOpOrder, 512 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); 513 Success = Builder.CreateExtractValue(Pair, 1, "success"); 514 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 515 516 if (NeedBitcast) 517 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy); 518 } 519 520 /// Emit IR to implement the given atomicrmw operation on values in registers, 521 /// returning the new value. 522 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, 523 Value *Loaded, Value *Inc) { 524 Value *NewVal; 525 switch (Op) { 526 case AtomicRMWInst::Xchg: 527 return Inc; 528 case AtomicRMWInst::Add: 529 return Builder.CreateAdd(Loaded, Inc, "new"); 530 case AtomicRMWInst::Sub: 531 return Builder.CreateSub(Loaded, Inc, "new"); 532 case AtomicRMWInst::And: 533 return Builder.CreateAnd(Loaded, Inc, "new"); 534 case AtomicRMWInst::Nand: 535 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new"); 536 case AtomicRMWInst::Or: 537 return Builder.CreateOr(Loaded, Inc, "new"); 538 case AtomicRMWInst::Xor: 539 return Builder.CreateXor(Loaded, Inc, "new"); 540 case AtomicRMWInst::Max: 541 NewVal = Builder.CreateICmpSGT(Loaded, Inc); 542 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 543 case AtomicRMWInst::Min: 544 NewVal = Builder.CreateICmpSLE(Loaded, Inc); 545 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 546 case AtomicRMWInst::UMax: 547 NewVal = Builder.CreateICmpUGT(Loaded, Inc); 548 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 549 case AtomicRMWInst::UMin: 550 NewVal = Builder.CreateICmpULE(Loaded, Inc); 551 return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); 552 default: 553 llvm_unreachable("Unknown atomic op"); 554 } 555 } 556 557 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { 558 switch (TLI->shouldExpandAtomicRMWInIR(AI)) { 559 case TargetLoweringBase::AtomicExpansionKind::None: 560 return false; 561 case TargetLoweringBase::AtomicExpansionKind::LLSC: { 562 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 563 unsigned ValueSize = getAtomicOpSize(AI); 564 if (ValueSize < MinCASSize) { 565 llvm_unreachable( 566 "MinCmpXchgSizeInBits not yet supported for LL/SC architectures."); 567 } else { 568 auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) { 569 return performAtomicOp(AI->getOperation(), Builder, Loaded, 570 AI->getValOperand()); 571 }; 572 expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(), 573 AI->getOrdering(), PerformOp); 574 } 575 return true; 576 } 577 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: { 578 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 579 unsigned ValueSize = getAtomicOpSize(AI); 580 if (ValueSize < MinCASSize) { 581 expandPartwordAtomicRMW(AI, 582 TargetLoweringBase::AtomicExpansionKind::CmpXChg); 583 } else { 584 expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun); 585 } 586 return true; 587 } 588 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: { 589 expandAtomicRMWToMaskedIntrinsic(AI); 590 return true; 591 } 592 default: 593 llvm_unreachable("Unhandled case in tryExpandAtomicRMW"); 594 } 595 } 596 597 namespace { 598 599 /// Result values from createMaskInstrs helper. 600 struct PartwordMaskValues { 601 Type *WordType; 602 Type *ValueType; 603 Value *AlignedAddr; 604 Value *ShiftAmt; 605 Value *Mask; 606 Value *Inv_Mask; 607 }; 608 609 } // end anonymous namespace 610 611 /// This is a helper function which builds instructions to provide 612 /// values necessary for partword atomic operations. It takes an 613 /// incoming address, Addr, and ValueType, and constructs the address, 614 /// shift-amounts and masks needed to work with a larger value of size 615 /// WordSize. 616 /// 617 /// AlignedAddr: Addr rounded down to a multiple of WordSize 618 /// 619 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded 620 /// from AlignAddr for it to have the same value as if 621 /// ValueType was loaded from Addr. 622 /// 623 /// Mask: Value to mask with the value loaded from AlignAddr to 624 /// include only the part that would've been loaded from Addr. 625 /// 626 /// Inv_Mask: The inverse of Mask. 627 static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I, 628 Type *ValueType, Value *Addr, 629 unsigned WordSize) { 630 PartwordMaskValues Ret; 631 632 BasicBlock *BB = I->getParent(); 633 Function *F = BB->getParent(); 634 Module *M = I->getModule(); 635 636 LLVMContext &Ctx = F->getContext(); 637 const DataLayout &DL = M->getDataLayout(); 638 639 unsigned ValueSize = DL.getTypeStoreSize(ValueType); 640 641 assert(ValueSize < WordSize); 642 643 Ret.ValueType = ValueType; 644 Ret.WordType = Type::getIntNTy(Ctx, WordSize * 8); 645 646 Type *WordPtrType = 647 Ret.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace()); 648 649 Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx)); 650 Ret.AlignedAddr = Builder.CreateIntToPtr( 651 Builder.CreateAnd(AddrInt, ~(uint64_t)(WordSize - 1)), WordPtrType, 652 "AlignedAddr"); 653 654 Value *PtrLSB = Builder.CreateAnd(AddrInt, WordSize - 1, "PtrLSB"); 655 if (DL.isLittleEndian()) { 656 // turn bytes into bits 657 Ret.ShiftAmt = Builder.CreateShl(PtrLSB, 3); 658 } else { 659 // turn bytes into bits, and count from the other side. 660 Ret.ShiftAmt = 661 Builder.CreateShl(Builder.CreateXor(PtrLSB, WordSize - ValueSize), 3); 662 } 663 664 Ret.ShiftAmt = Builder.CreateTrunc(Ret.ShiftAmt, Ret.WordType, "ShiftAmt"); 665 Ret.Mask = Builder.CreateShl( 666 ConstantInt::get(Ret.WordType, (1 << ValueSize * 8) - 1), Ret.ShiftAmt, 667 "Mask"); 668 Ret.Inv_Mask = Builder.CreateNot(Ret.Mask, "Inv_Mask"); 669 670 return Ret; 671 } 672 673 /// Emit IR to implement a masked version of a given atomicrmw 674 /// operation. (That is, only the bits under the Mask should be 675 /// affected by the operation) 676 static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op, 677 IRBuilder<> &Builder, Value *Loaded, 678 Value *Shifted_Inc, Value *Inc, 679 const PartwordMaskValues &PMV) { 680 // TODO: update to use 681 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order 682 // to merge bits from two values without requiring PMV.Inv_Mask. 683 switch (Op) { 684 case AtomicRMWInst::Xchg: { 685 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 686 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc); 687 return FinalVal; 688 } 689 case AtomicRMWInst::Or: 690 case AtomicRMWInst::Xor: 691 case AtomicRMWInst::And: 692 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW"); 693 case AtomicRMWInst::Add: 694 case AtomicRMWInst::Sub: 695 case AtomicRMWInst::Nand: { 696 // The other arithmetic ops need to be masked into place. 697 Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc); 698 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask); 699 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 700 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked); 701 return FinalVal; 702 } 703 case AtomicRMWInst::Max: 704 case AtomicRMWInst::Min: 705 case AtomicRMWInst::UMax: 706 case AtomicRMWInst::UMin: { 707 // Finally, comparison ops will operate on the full value, so 708 // truncate down to the original size, and expand out again after 709 // doing the operation. 710 Value *Loaded_Shiftdown = Builder.CreateTrunc( 711 Builder.CreateLShr(Loaded, PMV.ShiftAmt), PMV.ValueType); 712 Value *NewVal = performAtomicOp(Op, Builder, Loaded_Shiftdown, Inc); 713 Value *NewVal_Shiftup = Builder.CreateShl( 714 Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt); 715 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask); 716 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shiftup); 717 return FinalVal; 718 } 719 default: 720 llvm_unreachable("Unknown atomic op"); 721 } 722 } 723 724 /// Expand a sub-word atomicrmw operation into an appropriate 725 /// word-sized operation. 726 /// 727 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same 728 /// way as a typical atomicrmw expansion. The only difference here is 729 /// that the operation inside of the loop must operate only upon a 730 /// part of the value. 731 void AtomicExpand::expandPartwordAtomicRMW( 732 AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) { 733 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg); 734 735 AtomicOrdering MemOpOrder = AI->getOrdering(); 736 737 IRBuilder<> Builder(AI); 738 739 PartwordMaskValues PMV = 740 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 741 TLI->getMinCmpXchgSizeInBits() / 8); 742 743 Value *ValOperand_Shifted = 744 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), 745 PMV.ShiftAmt, "ValOperand_Shifted"); 746 747 auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) { 748 return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded, 749 ValOperand_Shifted, AI->getValOperand(), PMV); 750 }; 751 752 // TODO: When we're ready to support LLSC conversions too, use 753 // insertRMWLLSCLoop here for ExpansionKind==LLSC. 754 Value *OldResult = 755 insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder, 756 PerformPartwordOp, createCmpXchgInstFun); 757 Value *FinalOldResult = Builder.CreateTrunc( 758 Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType); 759 AI->replaceAllUsesWith(FinalOldResult); 760 AI->eraseFromParent(); 761 } 762 763 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width. 764 AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) { 765 IRBuilder<> Builder(AI); 766 AtomicRMWInst::BinOp Op = AI->getOperation(); 767 768 assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor || 769 Op == AtomicRMWInst::And) && 770 "Unable to widen operation"); 771 772 PartwordMaskValues PMV = 773 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 774 TLI->getMinCmpXchgSizeInBits() / 8); 775 776 Value *ValOperand_Shifted = 777 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType), 778 PMV.ShiftAmt, "ValOperand_Shifted"); 779 780 Value *NewOperand; 781 782 if (Op == AtomicRMWInst::And) 783 NewOperand = 784 Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand"); 785 else 786 NewOperand = ValOperand_Shifted; 787 788 AtomicRMWInst *NewAI = Builder.CreateAtomicRMW(Op, PMV.AlignedAddr, 789 NewOperand, AI->getOrdering()); 790 791 Value *FinalOldResult = Builder.CreateTrunc( 792 Builder.CreateLShr(NewAI, PMV.ShiftAmt), PMV.ValueType); 793 AI->replaceAllUsesWith(FinalOldResult); 794 AI->eraseFromParent(); 795 return NewAI; 796 } 797 798 void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { 799 // The basic idea here is that we're expanding a cmpxchg of a 800 // smaller memory size up to a word-sized cmpxchg. To do this, we 801 // need to add a retry-loop for strong cmpxchg, so that 802 // modifications to other parts of the word don't cause a spurious 803 // failure. 804 805 // This generates code like the following: 806 // [[Setup mask values PMV.*]] 807 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt 808 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt 809 // %InitLoaded = load i32* %addr 810 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask 811 // br partword.cmpxchg.loop 812 // partword.cmpxchg.loop: 813 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ], 814 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ] 815 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted 816 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted 817 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp, 818 // i32 %FullWord_NewVal success_ordering failure_ordering 819 // %OldVal = extractvalue { i32, i1 } %NewCI, 0 820 // %Success = extractvalue { i32, i1 } %NewCI, 1 821 // br i1 %Success, label %partword.cmpxchg.end, 822 // label %partword.cmpxchg.failure 823 // partword.cmpxchg.failure: 824 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask 825 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut 826 // br i1 %ShouldContinue, label %partword.cmpxchg.loop, 827 // label %partword.cmpxchg.end 828 // partword.cmpxchg.end: 829 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt 830 // %FinalOldVal = trunc i32 %tmp1 to i8 831 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0 832 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1 833 834 Value *Addr = CI->getPointerOperand(); 835 Value *Cmp = CI->getCompareOperand(); 836 Value *NewVal = CI->getNewValOperand(); 837 838 BasicBlock *BB = CI->getParent(); 839 Function *F = BB->getParent(); 840 IRBuilder<> Builder(CI); 841 LLVMContext &Ctx = Builder.getContext(); 842 843 const int WordSize = TLI->getMinCmpXchgSizeInBits() / 8; 844 845 BasicBlock *EndBB = 846 BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end"); 847 auto FailureBB = 848 BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB); 849 auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB); 850 851 // The split call above "helpfully" added a branch at the end of BB 852 // (to the wrong place). 853 std::prev(BB->end())->eraseFromParent(); 854 Builder.SetInsertPoint(BB); 855 856 PartwordMaskValues PMV = createMaskInstrs( 857 Builder, CI, CI->getCompareOperand()->getType(), Addr, WordSize); 858 859 // Shift the incoming values over, into the right location in the word. 860 Value *NewVal_Shifted = 861 Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt); 862 Value *Cmp_Shifted = 863 Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt); 864 865 // Load the entire current word, and mask into place the expected and new 866 // values 867 LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr); 868 InitLoaded->setVolatile(CI->isVolatile()); 869 Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask); 870 Builder.CreateBr(LoopBB); 871 872 // partword.cmpxchg.loop: 873 Builder.SetInsertPoint(LoopBB); 874 PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2); 875 Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB); 876 877 // Mask/Or the expected and new values into place in the loaded word. 878 Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted); 879 Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted); 880 AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg( 881 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, CI->getSuccessOrdering(), 882 CI->getFailureOrdering(), CI->getSyncScopeID()); 883 NewCI->setVolatile(CI->isVolatile()); 884 // When we're building a strong cmpxchg, we need a loop, so you 885 // might think we could use a weak cmpxchg inside. But, using strong 886 // allows the below comparison for ShouldContinue, and we're 887 // expecting the underlying cmpxchg to be a machine instruction, 888 // which is strong anyways. 889 NewCI->setWeak(CI->isWeak()); 890 891 Value *OldVal = Builder.CreateExtractValue(NewCI, 0); 892 Value *Success = Builder.CreateExtractValue(NewCI, 1); 893 894 if (CI->isWeak()) 895 Builder.CreateBr(EndBB); 896 else 897 Builder.CreateCondBr(Success, EndBB, FailureBB); 898 899 // partword.cmpxchg.failure: 900 Builder.SetInsertPoint(FailureBB); 901 // Upon failure, verify that the masked-out part of the loaded value 902 // has been modified. If it didn't, abort the cmpxchg, since the 903 // masked-in part must've. 904 Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask); 905 Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut); 906 Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB); 907 908 // Add the second value to the phi from above 909 Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB); 910 911 // partword.cmpxchg.end: 912 Builder.SetInsertPoint(CI); 913 914 Value *FinalOldVal = Builder.CreateTrunc( 915 Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType); 916 Value *Res = UndefValue::get(CI->getType()); 917 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0); 918 Res = Builder.CreateInsertValue(Res, Success, 1); 919 920 CI->replaceAllUsesWith(Res); 921 CI->eraseFromParent(); 922 } 923 924 void AtomicExpand::expandAtomicOpToLLSC( 925 Instruction *I, Type *ResultType, Value *Addr, AtomicOrdering MemOpOrder, 926 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) { 927 IRBuilder<> Builder(I); 928 Value *Loaded = 929 insertRMWLLSCLoop(Builder, ResultType, Addr, MemOpOrder, PerformOp); 930 931 I->replaceAllUsesWith(Loaded); 932 I->eraseFromParent(); 933 } 934 935 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) { 936 IRBuilder<> Builder(AI); 937 938 PartwordMaskValues PMV = 939 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(), 940 TLI->getMinCmpXchgSizeInBits() / 8); 941 942 // The value operand must be sign-extended for signed min/max so that the 943 // target's signed comparison instructions can be used. Otherwise, just 944 // zero-ext. 945 Instruction::CastOps CastOp = Instruction::ZExt; 946 AtomicRMWInst::BinOp RMWOp = AI->getOperation(); 947 if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min) 948 CastOp = Instruction::SExt; 949 950 Value *ValOperand_Shifted = Builder.CreateShl( 951 Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType), 952 PMV.ShiftAmt, "ValOperand_Shifted"); 953 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic( 954 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt, 955 AI->getOrdering()); 956 Value *FinalOldResult = Builder.CreateTrunc( 957 Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType); 958 AI->replaceAllUsesWith(FinalOldResult); 959 AI->eraseFromParent(); 960 } 961 962 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) { 963 IRBuilder<> Builder(CI); 964 965 PartwordMaskValues PMV = createMaskInstrs( 966 Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(), 967 TLI->getMinCmpXchgSizeInBits() / 8); 968 969 Value *CmpVal_Shifted = Builder.CreateShl( 970 Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt, 971 "CmpVal_Shifted"); 972 Value *NewVal_Shifted = Builder.CreateShl( 973 Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt, 974 "NewVal_Shifted"); 975 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic( 976 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask, 977 CI->getSuccessOrdering()); 978 Value *FinalOldVal = Builder.CreateTrunc( 979 Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType); 980 981 Value *Res = UndefValue::get(CI->getType()); 982 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0); 983 Value *Success = Builder.CreateICmpEQ( 984 CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success"); 985 Res = Builder.CreateInsertValue(Res, Success, 1); 986 987 CI->replaceAllUsesWith(Res); 988 CI->eraseFromParent(); 989 } 990 991 Value *AtomicExpand::insertRMWLLSCLoop( 992 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, 993 AtomicOrdering MemOpOrder, 994 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) { 995 LLVMContext &Ctx = Builder.getContext(); 996 BasicBlock *BB = Builder.GetInsertBlock(); 997 Function *F = BB->getParent(); 998 999 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 1000 // 1001 // The standard expansion we produce is: 1002 // [...] 1003 // atomicrmw.start: 1004 // %loaded = @load.linked(%addr) 1005 // %new = some_op iN %loaded, %incr 1006 // %stored = @store_conditional(%new, %addr) 1007 // %try_again = icmp i32 ne %stored, 0 1008 // br i1 %try_again, label %loop, label %atomicrmw.end 1009 // atomicrmw.end: 1010 // [...] 1011 BasicBlock *ExitBB = 1012 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end"); 1013 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 1014 1015 // The split call above "helpfully" added a branch at the end of BB (to the 1016 // wrong place). 1017 std::prev(BB->end())->eraseFromParent(); 1018 Builder.SetInsertPoint(BB); 1019 Builder.CreateBr(LoopBB); 1020 1021 // Start the main loop block now that we've taken care of the preliminaries. 1022 Builder.SetInsertPoint(LoopBB); 1023 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); 1024 1025 Value *NewVal = PerformOp(Builder, Loaded); 1026 1027 Value *StoreSuccess = 1028 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder); 1029 Value *TryAgain = Builder.CreateICmpNE( 1030 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain"); 1031 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB); 1032 1033 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1034 return Loaded; 1035 } 1036 1037 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of 1038 /// the equivalent bitwidth. We used to not support pointer cmpxchg in the 1039 /// IR. As a migration step, we convert back to what use to be the standard 1040 /// way to represent a pointer cmpxchg so that we can update backends one by 1041 /// one. 1042 AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) { 1043 auto *M = CI->getModule(); 1044 Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(), 1045 M->getDataLayout()); 1046 1047 IRBuilder<> Builder(CI); 1048 1049 Value *Addr = CI->getPointerOperand(); 1050 Type *PT = PointerType::get(NewTy, 1051 Addr->getType()->getPointerAddressSpace()); 1052 Value *NewAddr = Builder.CreateBitCast(Addr, PT); 1053 1054 Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy); 1055 Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy); 1056 1057 1058 auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal, 1059 CI->getSuccessOrdering(), 1060 CI->getFailureOrdering(), 1061 CI->getSyncScopeID()); 1062 NewCI->setVolatile(CI->isVolatile()); 1063 NewCI->setWeak(CI->isWeak()); 1064 LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n"); 1065 1066 Value *OldVal = Builder.CreateExtractValue(NewCI, 0); 1067 Value *Succ = Builder.CreateExtractValue(NewCI, 1); 1068 1069 OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType()); 1070 1071 Value *Res = UndefValue::get(CI->getType()); 1072 Res = Builder.CreateInsertValue(Res, OldVal, 0); 1073 Res = Builder.CreateInsertValue(Res, Succ, 1); 1074 1075 CI->replaceAllUsesWith(Res); 1076 CI->eraseFromParent(); 1077 return NewCI; 1078 } 1079 1080 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 1081 AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); 1082 AtomicOrdering FailureOrder = CI->getFailureOrdering(); 1083 Value *Addr = CI->getPointerOperand(); 1084 BasicBlock *BB = CI->getParent(); 1085 Function *F = BB->getParent(); 1086 LLVMContext &Ctx = F->getContext(); 1087 // If shouldInsertFencesForAtomic() returns true, then the target does not 1088 // want to deal with memory orders, and emitLeading/TrailingFence should take 1089 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we 1090 // should preserve the ordering. 1091 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI); 1092 AtomicOrdering MemOpOrder = 1093 ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder; 1094 1095 // In implementations which use a barrier to achieve release semantics, we can 1096 // delay emitting this barrier until we know a store is actually going to be 1097 // attempted. The cost of this delay is that we need 2 copies of the block 1098 // emitting the load-linked, affecting code size. 1099 // 1100 // Ideally, this logic would be unconditional except for the minsize check 1101 // since in other cases the extra blocks naturally collapse down to the 1102 // minimal loop. Unfortunately, this puts too much stress on later 1103 // optimisations so we avoid emitting the extra logic in those cases too. 1104 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && 1105 SuccessOrder != AtomicOrdering::Monotonic && 1106 SuccessOrder != AtomicOrdering::Acquire && 1107 !F->optForMinSize(); 1108 1109 // There's no overhead for sinking the release barrier in a weak cmpxchg, so 1110 // do it even on minsize. 1111 bool UseUnconditionalReleaseBarrier = F->optForMinSize() && !CI->isWeak(); 1112 1113 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord 1114 // 1115 // The full expansion we produce is: 1116 // [...] 1117 // cmpxchg.start: 1118 // %unreleasedload = @load.linked(%addr) 1119 // %should_store = icmp eq %unreleasedload, %desired 1120 // br i1 %should_store, label %cmpxchg.fencedstore, 1121 // label %cmpxchg.nostore 1122 // cmpxchg.releasingstore: 1123 // fence? 1124 // br label cmpxchg.trystore 1125 // cmpxchg.trystore: 1126 // %loaded.trystore = phi [%unreleasedload, %releasingstore], 1127 // [%releasedload, %cmpxchg.releasedload] 1128 // %stored = @store_conditional(%new, %addr) 1129 // %success = icmp eq i32 %stored, 0 1130 // br i1 %success, label %cmpxchg.success, 1131 // label %cmpxchg.releasedload/%cmpxchg.failure 1132 // cmpxchg.releasedload: 1133 // %releasedload = @load.linked(%addr) 1134 // %should_store = icmp eq %releasedload, %desired 1135 // br i1 %should_store, label %cmpxchg.trystore, 1136 // label %cmpxchg.failure 1137 // cmpxchg.success: 1138 // fence? 1139 // br label %cmpxchg.end 1140 // cmpxchg.nostore: 1141 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start], 1142 // [%releasedload, 1143 // %cmpxchg.releasedload/%cmpxchg.trystore] 1144 // @load_linked_fail_balance()? 1145 // br label %cmpxchg.failure 1146 // cmpxchg.failure: 1147 // fence? 1148 // br label %cmpxchg.end 1149 // cmpxchg.end: 1150 // %loaded = phi [%loaded.nostore, %cmpxchg.failure], 1151 // [%loaded.trystore, %cmpxchg.trystore] 1152 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure] 1153 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0 1154 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1 1155 // [...] 1156 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end"); 1157 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB); 1158 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB); 1159 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB); 1160 auto ReleasedLoadBB = 1161 BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB); 1162 auto TryStoreBB = 1163 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB); 1164 auto ReleasingStoreBB = 1165 BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB); 1166 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB); 1167 1168 // This grabs the DebugLoc from CI 1169 IRBuilder<> Builder(CI); 1170 1171 // The split call above "helpfully" added a branch at the end of BB (to the 1172 // wrong place), but we might want a fence too. It's easiest to just remove 1173 // the branch entirely. 1174 std::prev(BB->end())->eraseFromParent(); 1175 Builder.SetInsertPoint(BB); 1176 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier) 1177 TLI->emitLeadingFence(Builder, CI, SuccessOrder); 1178 Builder.CreateBr(StartBB); 1179 1180 // Start the main loop block now that we've taken care of the preliminaries. 1181 Builder.SetInsertPoint(StartBB); 1182 Value *UnreleasedLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); 1183 Value *ShouldStore = Builder.CreateICmpEQ( 1184 UnreleasedLoad, CI->getCompareOperand(), "should_store"); 1185 1186 // If the cmpxchg doesn't actually need any ordering when it fails, we can 1187 // jump straight past that fence instruction (if it exists). 1188 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB); 1189 1190 Builder.SetInsertPoint(ReleasingStoreBB); 1191 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier) 1192 TLI->emitLeadingFence(Builder, CI, SuccessOrder); 1193 Builder.CreateBr(TryStoreBB); 1194 1195 Builder.SetInsertPoint(TryStoreBB); 1196 Value *StoreSuccess = TLI->emitStoreConditional( 1197 Builder, CI->getNewValOperand(), Addr, MemOpOrder); 1198 StoreSuccess = Builder.CreateICmpEQ( 1199 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success"); 1200 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB; 1201 Builder.CreateCondBr(StoreSuccess, SuccessBB, 1202 CI->isWeak() ? FailureBB : RetryBB); 1203 1204 Builder.SetInsertPoint(ReleasedLoadBB); 1205 Value *SecondLoad; 1206 if (HasReleasedLoadBB) { 1207 SecondLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); 1208 ShouldStore = Builder.CreateICmpEQ(SecondLoad, CI->getCompareOperand(), 1209 "should_store"); 1210 1211 // If the cmpxchg doesn't actually need any ordering when it fails, we can 1212 // jump straight past that fence instruction (if it exists). 1213 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB); 1214 } else 1215 Builder.CreateUnreachable(); 1216 1217 // Make sure later instructions don't get reordered with a fence if 1218 // necessary. 1219 Builder.SetInsertPoint(SuccessBB); 1220 if (ShouldInsertFencesForAtomic) 1221 TLI->emitTrailingFence(Builder, CI, SuccessOrder); 1222 Builder.CreateBr(ExitBB); 1223 1224 Builder.SetInsertPoint(NoStoreBB); 1225 // In the failing case, where we don't execute the store-conditional, the 1226 // target might want to balance out the load-linked with a dedicated 1227 // instruction (e.g., on ARM, clearing the exclusive monitor). 1228 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder); 1229 Builder.CreateBr(FailureBB); 1230 1231 Builder.SetInsertPoint(FailureBB); 1232 if (ShouldInsertFencesForAtomic) 1233 TLI->emitTrailingFence(Builder, CI, FailureOrder); 1234 Builder.CreateBr(ExitBB); 1235 1236 // Finally, we have control-flow based knowledge of whether the cmpxchg 1237 // succeeded or not. We expose this to later passes by converting any 1238 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate 1239 // PHI. 1240 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1241 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2); 1242 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB); 1243 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB); 1244 1245 // Setup the builder so we can create any PHIs we need. 1246 Value *Loaded; 1247 if (!HasReleasedLoadBB) 1248 Loaded = UnreleasedLoad; 1249 else { 1250 Builder.SetInsertPoint(TryStoreBB, TryStoreBB->begin()); 1251 PHINode *TryStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2); 1252 TryStoreLoaded->addIncoming(UnreleasedLoad, ReleasingStoreBB); 1253 TryStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB); 1254 1255 Builder.SetInsertPoint(NoStoreBB, NoStoreBB->begin()); 1256 PHINode *NoStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2); 1257 NoStoreLoaded->addIncoming(UnreleasedLoad, StartBB); 1258 NoStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB); 1259 1260 Builder.SetInsertPoint(ExitBB, ++ExitBB->begin()); 1261 PHINode *ExitLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2); 1262 ExitLoaded->addIncoming(TryStoreLoaded, SuccessBB); 1263 ExitLoaded->addIncoming(NoStoreLoaded, FailureBB); 1264 1265 Loaded = ExitLoaded; 1266 } 1267 1268 // Look for any users of the cmpxchg that are just comparing the loaded value 1269 // against the desired one, and replace them with the CFG-derived version. 1270 SmallVector<ExtractValueInst *, 2> PrunedInsts; 1271 for (auto User : CI->users()) { 1272 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User); 1273 if (!EV) 1274 continue; 1275 1276 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 && 1277 "weird extraction from { iN, i1 }"); 1278 1279 if (EV->getIndices()[0] == 0) 1280 EV->replaceAllUsesWith(Loaded); 1281 else 1282 EV->replaceAllUsesWith(Success); 1283 1284 PrunedInsts.push_back(EV); 1285 } 1286 1287 // We can remove the instructions now we're no longer iterating through them. 1288 for (auto EV : PrunedInsts) 1289 EV->eraseFromParent(); 1290 1291 if (!CI->use_empty()) { 1292 // Some use of the full struct return that we don't understand has happened, 1293 // so we've got to reconstruct it properly. 1294 Value *Res; 1295 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0); 1296 Res = Builder.CreateInsertValue(Res, Success, 1); 1297 1298 CI->replaceAllUsesWith(Res); 1299 } 1300 1301 CI->eraseFromParent(); 1302 return true; 1303 } 1304 1305 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) { 1306 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand()); 1307 if(!C) 1308 return false; 1309 1310 AtomicRMWInst::BinOp Op = RMWI->getOperation(); 1311 switch(Op) { 1312 case AtomicRMWInst::Add: 1313 case AtomicRMWInst::Sub: 1314 case AtomicRMWInst::Or: 1315 case AtomicRMWInst::Xor: 1316 return C->isZero(); 1317 case AtomicRMWInst::And: 1318 return C->isMinusOne(); 1319 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/... 1320 default: 1321 return false; 1322 } 1323 } 1324 1325 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) { 1326 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { 1327 tryExpandAtomicLoad(ResultingLoad); 1328 return true; 1329 } 1330 return false; 1331 } 1332 1333 Value *AtomicExpand::insertRMWCmpXchgLoop( 1334 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, 1335 AtomicOrdering MemOpOrder, 1336 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp, 1337 CreateCmpXchgInstFun CreateCmpXchg) { 1338 LLVMContext &Ctx = Builder.getContext(); 1339 BasicBlock *BB = Builder.GetInsertBlock(); 1340 Function *F = BB->getParent(); 1341 1342 // Given: atomicrmw some_op iN* %addr, iN %incr ordering 1343 // 1344 // The standard expansion we produce is: 1345 // [...] 1346 // %init_loaded = load atomic iN* %addr 1347 // br label %loop 1348 // loop: 1349 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ] 1350 // %new = some_op iN %loaded, %incr 1351 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new 1352 // %new_loaded = extractvalue { iN, i1 } %pair, 0 1353 // %success = extractvalue { iN, i1 } %pair, 1 1354 // br i1 %success, label %atomicrmw.end, label %loop 1355 // atomicrmw.end: 1356 // [...] 1357 BasicBlock *ExitBB = 1358 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end"); 1359 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB); 1360 1361 // The split call above "helpfully" added a branch at the end of BB (to the 1362 // wrong place), but we want a load. It's easiest to just remove 1363 // the branch entirely. 1364 std::prev(BB->end())->eraseFromParent(); 1365 Builder.SetInsertPoint(BB); 1366 LoadInst *InitLoaded = Builder.CreateLoad(ResultTy, Addr); 1367 // Atomics require at least natural alignment. 1368 InitLoaded->setAlignment(ResultTy->getPrimitiveSizeInBits() / 8); 1369 Builder.CreateBr(LoopBB); 1370 1371 // Start the main loop block now that we've taken care of the preliminaries. 1372 Builder.SetInsertPoint(LoopBB); 1373 PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded"); 1374 Loaded->addIncoming(InitLoaded, BB); 1375 1376 Value *NewVal = PerformOp(Builder, Loaded); 1377 1378 Value *NewLoaded = nullptr; 1379 Value *Success = nullptr; 1380 1381 CreateCmpXchg(Builder, Addr, Loaded, NewVal, 1382 MemOpOrder == AtomicOrdering::Unordered 1383 ? AtomicOrdering::Monotonic 1384 : MemOpOrder, 1385 Success, NewLoaded); 1386 assert(Success && NewLoaded); 1387 1388 Loaded->addIncoming(NewLoaded, LoopBB); 1389 1390 Builder.CreateCondBr(Success, ExitBB, LoopBB); 1391 1392 Builder.SetInsertPoint(ExitBB, ExitBB->begin()); 1393 return NewLoaded; 1394 } 1395 1396 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { 1397 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; 1398 unsigned ValueSize = getAtomicOpSize(CI); 1399 1400 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) { 1401 default: 1402 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg"); 1403 case TargetLoweringBase::AtomicExpansionKind::None: 1404 if (ValueSize < MinCASSize) 1405 expandPartwordCmpXchg(CI); 1406 return false; 1407 case TargetLoweringBase::AtomicExpansionKind::LLSC: { 1408 assert(ValueSize >= MinCASSize && 1409 "MinCmpXchgSizeInBits not yet supported for LL/SC expansions."); 1410 return expandAtomicCmpXchg(CI); 1411 } 1412 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: 1413 expandAtomicCmpXchgToMaskedIntrinsic(CI); 1414 return true; 1415 } 1416 } 1417 1418 // Note: This function is exposed externally by AtomicExpandUtils.h 1419 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, 1420 CreateCmpXchgInstFun CreateCmpXchg) { 1421 IRBuilder<> Builder(AI); 1422 Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop( 1423 Builder, AI->getType(), AI->getPointerOperand(), AI->getOrdering(), 1424 [&](IRBuilder<> &Builder, Value *Loaded) { 1425 return performAtomicOp(AI->getOperation(), Builder, Loaded, 1426 AI->getValOperand()); 1427 }, 1428 CreateCmpXchg); 1429 1430 AI->replaceAllUsesWith(Loaded); 1431 AI->eraseFromParent(); 1432 return true; 1433 } 1434 1435 // In order to use one of the sized library calls such as 1436 // __atomic_fetch_add_4, the alignment must be sufficient, the size 1437 // must be one of the potentially-specialized sizes, and the value 1438 // type must actually exist in C on the target (otherwise, the 1439 // function wouldn't actually be defined.) 1440 static bool canUseSizedAtomicCall(unsigned Size, unsigned Align, 1441 const DataLayout &DL) { 1442 // TODO: "LargestSize" is an approximation for "largest type that 1443 // you can express in C". It seems to be the case that int128 is 1444 // supported on all 64-bit platforms, otherwise only up to 64-bit 1445 // integers are supported. If we get this wrong, then we'll try to 1446 // call a sized libcall that doesn't actually exist. There should 1447 // really be some more reliable way in LLVM of determining integer 1448 // sizes which are valid in the target's C ABI... 1449 unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8; 1450 return Align >= Size && 1451 (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) && 1452 Size <= LargestSize; 1453 } 1454 1455 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) { 1456 static const RTLIB::Libcall Libcalls[6] = { 1457 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2, 1458 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16}; 1459 unsigned Size = getAtomicOpSize(I); 1460 unsigned Align = getAtomicOpAlign(I); 1461 1462 bool expanded = expandAtomicOpToLibcall( 1463 I, Size, Align, I->getPointerOperand(), nullptr, nullptr, 1464 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1465 (void)expanded; 1466 assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor Load"); 1467 } 1468 1469 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) { 1470 static const RTLIB::Libcall Libcalls[6] = { 1471 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2, 1472 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16}; 1473 unsigned Size = getAtomicOpSize(I); 1474 unsigned Align = getAtomicOpAlign(I); 1475 1476 bool expanded = expandAtomicOpToLibcall( 1477 I, Size, Align, I->getPointerOperand(), I->getValueOperand(), nullptr, 1478 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1479 (void)expanded; 1480 assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor Store"); 1481 } 1482 1483 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) { 1484 static const RTLIB::Libcall Libcalls[6] = { 1485 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1, 1486 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4, 1487 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16}; 1488 unsigned Size = getAtomicOpSize(I); 1489 unsigned Align = getAtomicOpAlign(I); 1490 1491 bool expanded = expandAtomicOpToLibcall( 1492 I, Size, Align, I->getPointerOperand(), I->getNewValOperand(), 1493 I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(), 1494 Libcalls); 1495 (void)expanded; 1496 assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor CAS"); 1497 } 1498 1499 static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) { 1500 static const RTLIB::Libcall LibcallsXchg[6] = { 1501 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1, 1502 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4, 1503 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16}; 1504 static const RTLIB::Libcall LibcallsAdd[6] = { 1505 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1, 1506 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4, 1507 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16}; 1508 static const RTLIB::Libcall LibcallsSub[6] = { 1509 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1, 1510 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4, 1511 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16}; 1512 static const RTLIB::Libcall LibcallsAnd[6] = { 1513 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1, 1514 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4, 1515 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16}; 1516 static const RTLIB::Libcall LibcallsOr[6] = { 1517 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1, 1518 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4, 1519 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16}; 1520 static const RTLIB::Libcall LibcallsXor[6] = { 1521 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1, 1522 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4, 1523 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16}; 1524 static const RTLIB::Libcall LibcallsNand[6] = { 1525 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1, 1526 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4, 1527 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16}; 1528 1529 switch (Op) { 1530 case AtomicRMWInst::BAD_BINOP: 1531 llvm_unreachable("Should not have BAD_BINOP."); 1532 case AtomicRMWInst::Xchg: 1533 return makeArrayRef(LibcallsXchg); 1534 case AtomicRMWInst::Add: 1535 return makeArrayRef(LibcallsAdd); 1536 case AtomicRMWInst::Sub: 1537 return makeArrayRef(LibcallsSub); 1538 case AtomicRMWInst::And: 1539 return makeArrayRef(LibcallsAnd); 1540 case AtomicRMWInst::Or: 1541 return makeArrayRef(LibcallsOr); 1542 case AtomicRMWInst::Xor: 1543 return makeArrayRef(LibcallsXor); 1544 case AtomicRMWInst::Nand: 1545 return makeArrayRef(LibcallsNand); 1546 case AtomicRMWInst::Max: 1547 case AtomicRMWInst::Min: 1548 case AtomicRMWInst::UMax: 1549 case AtomicRMWInst::UMin: 1550 // No atomic libcalls are available for max/min/umax/umin. 1551 return {}; 1552 } 1553 llvm_unreachable("Unexpected AtomicRMW operation."); 1554 } 1555 1556 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) { 1557 ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation()); 1558 1559 unsigned Size = getAtomicOpSize(I); 1560 unsigned Align = getAtomicOpAlign(I); 1561 1562 bool Success = false; 1563 if (!Libcalls.empty()) 1564 Success = expandAtomicOpToLibcall( 1565 I, Size, Align, I->getPointerOperand(), I->getValOperand(), nullptr, 1566 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls); 1567 1568 // The expansion failed: either there were no libcalls at all for 1569 // the operation (min/max), or there were only size-specialized 1570 // libcalls (add/sub/etc) and we needed a generic. So, expand to a 1571 // CAS libcall, via a CAS loop, instead. 1572 if (!Success) { 1573 expandAtomicRMWToCmpXchg(I, [this](IRBuilder<> &Builder, Value *Addr, 1574 Value *Loaded, Value *NewVal, 1575 AtomicOrdering MemOpOrder, 1576 Value *&Success, Value *&NewLoaded) { 1577 // Create the CAS instruction normally... 1578 AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg( 1579 Addr, Loaded, NewVal, MemOpOrder, 1580 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); 1581 Success = Builder.CreateExtractValue(Pair, 1, "success"); 1582 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded"); 1583 1584 // ...and then expand the CAS into a libcall. 1585 expandAtomicCASToLibcall(Pair); 1586 }); 1587 } 1588 } 1589 1590 // A helper routine for the above expandAtomic*ToLibcall functions. 1591 // 1592 // 'Libcalls' contains an array of enum values for the particular 1593 // ATOMIC libcalls to be emitted. All of the other arguments besides 1594 // 'I' are extracted from the Instruction subclass by the 1595 // caller. Depending on the particular call, some will be null. 1596 bool AtomicExpand::expandAtomicOpToLibcall( 1597 Instruction *I, unsigned Size, unsigned Align, Value *PointerOperand, 1598 Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering, 1599 AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) { 1600 assert(Libcalls.size() == 6); 1601 1602 LLVMContext &Ctx = I->getContext(); 1603 Module *M = I->getModule(); 1604 const DataLayout &DL = M->getDataLayout(); 1605 IRBuilder<> Builder(I); 1606 IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front()); 1607 1608 bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL); 1609 Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8); 1610 1611 unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy); 1612 1613 // TODO: the "order" argument type is "int", not int32. So 1614 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints. 1615 ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size); 1616 assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO"); 1617 Constant *OrderingVal = 1618 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering)); 1619 Constant *Ordering2Val = nullptr; 1620 if (CASExpected) { 1621 assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO"); 1622 Ordering2Val = 1623 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2)); 1624 } 1625 bool HasResult = I->getType() != Type::getVoidTy(Ctx); 1626 1627 RTLIB::Libcall RTLibType; 1628 if (UseSizedLibcall) { 1629 switch (Size) { 1630 case 1: RTLibType = Libcalls[1]; break; 1631 case 2: RTLibType = Libcalls[2]; break; 1632 case 4: RTLibType = Libcalls[3]; break; 1633 case 8: RTLibType = Libcalls[4]; break; 1634 case 16: RTLibType = Libcalls[5]; break; 1635 } 1636 } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) { 1637 RTLibType = Libcalls[0]; 1638 } else { 1639 // Can't use sized function, and there's no generic for this 1640 // operation, so give up. 1641 return false; 1642 } 1643 1644 // Build up the function call. There's two kinds. First, the sized 1645 // variants. These calls are going to be one of the following (with 1646 // N=1,2,4,8,16): 1647 // iN __atomic_load_N(iN *ptr, int ordering) 1648 // void __atomic_store_N(iN *ptr, iN val, int ordering) 1649 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering) 1650 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired, 1651 // int success_order, int failure_order) 1652 // 1653 // Note that these functions can be used for non-integer atomic 1654 // operations, the values just need to be bitcast to integers on the 1655 // way in and out. 1656 // 1657 // And, then, the generic variants. They look like the following: 1658 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering) 1659 // void __atomic_store(size_t size, void *ptr, void *val, int ordering) 1660 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret, 1661 // int ordering) 1662 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected, 1663 // void *desired, int success_order, 1664 // int failure_order) 1665 // 1666 // The different signatures are built up depending on the 1667 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult' 1668 // variables. 1669 1670 AllocaInst *AllocaCASExpected = nullptr; 1671 Value *AllocaCASExpected_i8 = nullptr; 1672 AllocaInst *AllocaValue = nullptr; 1673 Value *AllocaValue_i8 = nullptr; 1674 AllocaInst *AllocaResult = nullptr; 1675 Value *AllocaResult_i8 = nullptr; 1676 1677 Type *ResultTy; 1678 SmallVector<Value *, 6> Args; 1679 AttributeList Attr; 1680 1681 // 'size' argument. 1682 if (!UseSizedLibcall) { 1683 // Note, getIntPtrType is assumed equivalent to size_t. 1684 Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size)); 1685 } 1686 1687 // 'ptr' argument. 1688 Value *PtrVal = 1689 Builder.CreateBitCast(PointerOperand, Type::getInt8PtrTy(Ctx)); 1690 Args.push_back(PtrVal); 1691 1692 // 'expected' argument, if present. 1693 if (CASExpected) { 1694 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType()); 1695 AllocaCASExpected->setAlignment(AllocaAlignment); 1696 AllocaCASExpected_i8 = 1697 Builder.CreateBitCast(AllocaCASExpected, Type::getInt8PtrTy(Ctx)); 1698 Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64); 1699 Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment); 1700 Args.push_back(AllocaCASExpected_i8); 1701 } 1702 1703 // 'val' argument ('desired' for cas), if present. 1704 if (ValueOperand) { 1705 if (UseSizedLibcall) { 1706 Value *IntValue = 1707 Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy); 1708 Args.push_back(IntValue); 1709 } else { 1710 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType()); 1711 AllocaValue->setAlignment(AllocaAlignment); 1712 AllocaValue_i8 = 1713 Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx)); 1714 Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64); 1715 Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment); 1716 Args.push_back(AllocaValue_i8); 1717 } 1718 } 1719 1720 // 'ret' argument. 1721 if (!CASExpected && HasResult && !UseSizedLibcall) { 1722 AllocaResult = AllocaBuilder.CreateAlloca(I->getType()); 1723 AllocaResult->setAlignment(AllocaAlignment); 1724 AllocaResult_i8 = 1725 Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx)); 1726 Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64); 1727 Args.push_back(AllocaResult_i8); 1728 } 1729 1730 // 'ordering' ('success_order' for cas) argument. 1731 Args.push_back(OrderingVal); 1732 1733 // 'failure_order' argument, if present. 1734 if (Ordering2Val) 1735 Args.push_back(Ordering2Val); 1736 1737 // Now, the return type. 1738 if (CASExpected) { 1739 ResultTy = Type::getInt1Ty(Ctx); 1740 Attr = Attr.addAttribute(Ctx, AttributeList::ReturnIndex, Attribute::ZExt); 1741 } else if (HasResult && UseSizedLibcall) 1742 ResultTy = SizedIntTy; 1743 else 1744 ResultTy = Type::getVoidTy(Ctx); 1745 1746 // Done with setting up arguments and return types, create the call: 1747 SmallVector<Type *, 6> ArgTys; 1748 for (Value *Arg : Args) 1749 ArgTys.push_back(Arg->getType()); 1750 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false); 1751 Constant *LibcallFn = 1752 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr); 1753 CallInst *Call = Builder.CreateCall(LibcallFn, Args); 1754 Call->setAttributes(Attr); 1755 Value *Result = Call; 1756 1757 // And then, extract the results... 1758 if (ValueOperand && !UseSizedLibcall) 1759 Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64); 1760 1761 if (CASExpected) { 1762 // The final result from the CAS is {load of 'expected' alloca, bool result 1763 // from call} 1764 Type *FinalResultTy = I->getType(); 1765 Value *V = UndefValue::get(FinalResultTy); 1766 Value *ExpectedOut = 1767 Builder.CreateAlignedLoad(AllocaCASExpected, AllocaAlignment); 1768 Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64); 1769 V = Builder.CreateInsertValue(V, ExpectedOut, 0); 1770 V = Builder.CreateInsertValue(V, Result, 1); 1771 I->replaceAllUsesWith(V); 1772 } else if (HasResult) { 1773 Value *V; 1774 if (UseSizedLibcall) 1775 V = Builder.CreateBitOrPointerCast(Result, I->getType()); 1776 else { 1777 V = Builder.CreateAlignedLoad(AllocaResult, AllocaAlignment); 1778 Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64); 1779 } 1780 I->replaceAllUsesWith(V); 1781 } 1782 I->eraseFromParent(); 1783 return true; 1784 } 1785