1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         ValueTy = lvalue.getType();
77         ValueSizeInBits = C.getTypeSize(ValueTy);
78         auto &OrigBFI = lvalue.getBitFieldInfo();
79         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80         AtomicSizeInBits = C.toBits(
81             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                 .alignTo(lvalue.getAlignment()));
83         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84         auto OffsetInChars =
85             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86             lvalue.getAlignment();
87         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88             VoidPtrAddr, OffsetInChars.getQuantity());
89         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90             VoidPtrAddr,
91             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92             "atomic_bitfield_base");
93         BFI = OrigBFI;
94         BFI.Offset = Offset;
95         BFI.StorageSize = AtomicSizeInBits;
96         BFI.StorageOffset += OffsetInChars;
97         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
99                                     lvalue.getTBAAInfo());
100         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101         if (AtomicTy.isNull()) {
102           llvm::APInt Size(
103               /*numBits=*/32,
104               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105           AtomicTy =
106               C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107                                      /*IndexTypeQuals=*/0);
108         }
109         AtomicAlign = ValueAlign = lvalue.getAlignment();
110       } else if (lvalue.isVectorElt()) {
111         ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112         ValueSizeInBits = C.getTypeSize(ValueTy);
113         AtomicTy = lvalue.getType();
114         AtomicSizeInBits = C.getTypeSize(AtomicTy);
115         AtomicAlign = ValueAlign = lvalue.getAlignment();
116         LVal = lvalue;
117       } else {
118         assert(lvalue.isExtVectorElt());
119         ValueTy = lvalue.getType();
120         ValueSizeInBits = C.getTypeSize(ValueTy);
121         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122             lvalue.getType(), cast<llvm::FixedVectorType>(
123                                   lvalue.getExtVectorAddress().getElementType())
124                                   ->getNumElements());
125         AtomicSizeInBits = C.getTypeSize(AtomicTy);
126         AtomicAlign = ValueAlign = lvalue.getAlignment();
127         LVal = lvalue;
128       }
129       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131     }
132 
133     QualType getAtomicType() const { return AtomicTy; }
134     QualType getValueType() const { return ValueTy; }
135     CharUnits getAtomicAlignment() const { return AtomicAlign; }
136     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139     bool shouldUseLibcall() const { return UseLibcall; }
140     const LValue &getAtomicLValue() const { return LVal; }
141     llvm::Value *getAtomicPointer() const {
142       if (LVal.isSimple())
143         return LVal.getPointer(CGF);
144       else if (LVal.isBitField())
145         return LVal.getBitFieldPointer();
146       else if (LVal.isVectorElt())
147         return LVal.getVectorPointer();
148       assert(LVal.isExtVectorElt());
149       return LVal.getExtVectorPointer();
150     }
151     Address getAtomicAddress() const {
152       return Address(getAtomicPointer(), getAtomicAlignment());
153     }
154 
155     Address getAtomicAddressAsAtomicIntPointer() const {
156       return emitCastToAtomicIntPointer(getAtomicAddress());
157     }
158 
159     /// Is the atomic size larger than the underlying value type?
160     ///
161     /// Note that the absence of padding does not mean that atomic
162     /// objects are completely interchangeable with non-atomic
163     /// objects: we might have promoted the alignment of a type
164     /// without making it bigger.
165     bool hasPadding() const {
166       return (ValueSizeInBits != AtomicSizeInBits);
167     }
168 
169     bool emitMemSetZeroIfNecessary() const;
170 
171     llvm::Value *getAtomicSizeValue() const {
172       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173       return CGF.CGM.getSize(size);
174     }
175 
176     /// Cast the given pointer to an integer pointer suitable for atomic
177     /// operations if the source.
178     Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180     /// If Addr is compatible with the iN that will be used for an atomic
181     /// operation, bitcast it. Otherwise, create a temporary that is suitable
182     /// and copy the value across.
183     Address convertToAtomicIntPointer(Address Addr) const;
184 
185     /// Turn an atomic-layout object into an r-value.
186     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187                                      SourceLocation loc, bool AsValue) const;
188 
189     /// Converts a rvalue to integer value.
190     llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193                                      AggValueSlot ResultSlot,
194                                      SourceLocation Loc, bool AsValue) const;
195 
196     /// Copy an atomic r-value into atomic-layout memory.
197     void emitCopyIntoMemory(RValue rvalue) const;
198 
199     /// Project an l-value down to the value field.
200     LValue projectValue() const {
201       assert(LVal.isSimple());
202       Address addr = getAtomicAddress();
203       if (hasPadding())
204         addr = CGF.Builder.CreateStructGEP(addr, 0);
205 
206       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207                               LVal.getBaseInfo(), LVal.getTBAAInfo());
208     }
209 
210     /// Emits atomic load.
211     /// \returns Loaded value.
212     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213                           bool AsValue, llvm::AtomicOrdering AO,
214                           bool IsVolatile);
215 
216     /// Emits atomic compare-and-exchange sequence.
217     /// \param Expected Expected value.
218     /// \param Desired Desired value.
219     /// \param Success Atomic ordering for success operation.
220     /// \param Failure Atomic ordering for failed operation.
221     /// \param IsWeak true if atomic operation is weak, false otherwise.
222     /// \returns Pair of values: previous value from storage (value type) and
223     /// boolean flag (i1 type) with true if success and false otherwise.
224     std::pair<RValue, llvm::Value *>
225     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226                               llvm::AtomicOrdering Success =
227                                   llvm::AtomicOrdering::SequentiallyConsistent,
228                               llvm::AtomicOrdering Failure =
229                                   llvm::AtomicOrdering::SequentiallyConsistent,
230                               bool IsWeak = false);
231 
232     /// Emits atomic update.
233     /// \param AO Atomic ordering.
234     /// \param UpdateOp Update operation for the current lvalue.
235     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
237                           bool IsVolatile);
238     /// Emits atomic update.
239     /// \param AO Atomic ordering.
240     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241                           bool IsVolatile);
242 
243     /// Materialize an atomic r-value in atomic-layout memory.
244     Address materializeRValue(RValue rvalue) const;
245 
246     /// Creates temp alloca for intermediate operations on atomic value.
247     Address CreateTempAlloca() const;
248   private:
249     bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252     /// Emits atomic load as a libcall.
253     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254                                llvm::AtomicOrdering AO, bool IsVolatile);
255     /// Emits atomic load as LLVM instruction.
256     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257     /// Emits atomic compare-and-exchange op as a libcall.
258     llvm::Value *EmitAtomicCompareExchangeLibcall(
259         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260         llvm::AtomicOrdering Success =
261             llvm::AtomicOrdering::SequentiallyConsistent,
262         llvm::AtomicOrdering Failure =
263             llvm::AtomicOrdering::SequentiallyConsistent);
264     /// Emits atomic compare-and-exchange op as LLVM instruction.
265     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267         llvm::AtomicOrdering Success =
268             llvm::AtomicOrdering::SequentiallyConsistent,
269         llvm::AtomicOrdering Failure =
270             llvm::AtomicOrdering::SequentiallyConsistent,
271         bool IsWeak = false);
272     /// Emit atomic update as libcalls.
273     void
274     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
276                             bool IsVolatile);
277     /// Emit atomic update as LLVM instructions.
278     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
280                             bool IsVolatile);
281     /// Emit atomic update as libcalls.
282     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283                                  bool IsVolatile);
284     /// Emit atomic update as LLVM instructions.
285     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286                             bool IsVolatile);
287   };
288 }
289 
290 Address AtomicInfo::CreateTempAlloca() const {
291   Address TempAlloca = CGF.CreateMemTemp(
292       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293                                                                 : AtomicTy,
294       getAtomicAlignment(),
295       "atomic-temp");
296   // Cast to pointer to value type for bitfields.
297   if (LVal.isBitField())
298     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299         TempAlloca, getAtomicAddress().getType());
300   return TempAlloca;
301 }
302 
303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304                                 StringRef fnName,
305                                 QualType resultType,
306                                 CallArgList &args) {
307   const CGFunctionInfo &fnInfo =
308     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310   llvm::AttrBuilder fnAttrB;
311   fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
312   fnAttrB.addAttribute(llvm::Attribute::WillReturn);
313   llvm::AttributeList fnAttrs = llvm::AttributeList::get(
314       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
315 
316   llvm::FunctionCallee fn =
317       CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
318   auto callee = CGCallee::forDirect(fn);
319   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
320 }
321 
322 /// Does a store of the given IR type modify the full expected width?
323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324                            uint64_t expectedSize) {
325   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326 }
327 
328 /// Does the atomic type require memsetting to zero before initialization?
329 ///
330 /// The IR type is provided as a way of making certain queries faster.
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332   // If the atomic type has size padding, we definitely need a memset.
333   if (hasPadding()) return true;
334 
335   // Otherwise, do some simple heuristics to try to avoid it:
336   switch (getEvaluationKind()) {
337   // For scalars and complexes, check whether the store size of the
338   // type uses the full size.
339   case TEK_Scalar:
340     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
341   case TEK_Complex:
342     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343                            AtomicSizeInBits / 2);
344 
345   // Padding in structs has an undefined bit pattern.  User beware.
346   case TEK_Aggregate:
347     return false;
348   }
349   llvm_unreachable("bad evaluation kind");
350 }
351 
352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353   assert(LVal.isSimple());
354   llvm::Value *addr = LVal.getPointer(CGF);
355   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
356     return false;
357 
358   CGF.Builder.CreateMemSet(
359       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361       LVal.getAlignment().getAsAlign());
362   return true;
363 }
364 
365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366                               Address Dest, Address Ptr,
367                               Address Val1, Address Val2,
368                               uint64_t Size,
369                               llvm::AtomicOrdering SuccessOrder,
370                               llvm::AtomicOrdering FailureOrder,
371                               llvm::SyncScope::ID Scope) {
372   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
373   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
374   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
375 
376   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
377       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
378       Scope);
379   Pair->setVolatile(E->isVolatile());
380   Pair->setWeak(IsWeak);
381 
382   // Cmp holds the result of the compare-exchange operation: true on success,
383   // false on failure.
384   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
386 
387   // This basic block is used to hold the store instruction if the operation
388   // failed.
389   llvm::BasicBlock *StoreExpectedBB =
390       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
391 
392   // This basic block is the exit point of the operation, we should end up
393   // here regardless of whether or not the operation succeeded.
394   llvm::BasicBlock *ContinueBB =
395       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
396 
397   // Update Expected if Expected isn't equal to Old, otherwise branch to the
398   // exit point.
399   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
400 
401   CGF.Builder.SetInsertPoint(StoreExpectedBB);
402   // Update the memory at Expected with Old's value.
403   CGF.Builder.CreateStore(Old, Val1);
404   // Finally, branch to the exit point.
405   CGF.Builder.CreateBr(ContinueBB);
406 
407   CGF.Builder.SetInsertPoint(ContinueBB);
408   // Update the memory at Dest with Cmp's value.
409   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
410 }
411 
412 /// Given an ordering required on success, emit all possible cmpxchg
413 /// instructions to cope with the provided (but possibly only dynamically known)
414 /// FailureOrder.
415 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
416                                         bool IsWeak, Address Dest, Address Ptr,
417                                         Address Val1, Address Val2,
418                                         llvm::Value *FailureOrderVal,
419                                         uint64_t Size,
420                                         llvm::AtomicOrdering SuccessOrder,
421                                         llvm::SyncScope::ID Scope) {
422   llvm::AtomicOrdering FailureOrder;
423   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
424     auto FOS = FO->getSExtValue();
425     if (!llvm::isValidAtomicOrderingCABI(FOS))
426       FailureOrder = llvm::AtomicOrdering::Monotonic;
427     else
428       switch ((llvm::AtomicOrderingCABI)FOS) {
429       case llvm::AtomicOrderingCABI::relaxed:
430       case llvm::AtomicOrderingCABI::release:
431       case llvm::AtomicOrderingCABI::acq_rel:
432         FailureOrder = llvm::AtomicOrdering::Monotonic;
433         break;
434       case llvm::AtomicOrderingCABI::consume:
435       case llvm::AtomicOrderingCABI::acquire:
436         FailureOrder = llvm::AtomicOrdering::Acquire;
437         break;
438       case llvm::AtomicOrderingCABI::seq_cst:
439         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
440         break;
441       }
442     if (isStrongerThan(FailureOrder, SuccessOrder)) {
443       // Don't assert on undefined behavior "failure argument shall be no
444       // stronger than the success argument".
445       FailureOrder =
446           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
447     }
448     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
449                       FailureOrder, Scope);
450     return;
451   }
452 
453   // Create all the relevant BB's
454   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
455                    *SeqCstBB = nullptr;
456   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
457   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
458       SuccessOrder != llvm::AtomicOrdering::Release)
459     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
460   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
461     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
462 
463   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
464 
465   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
466 
467   // Emit all the different atomics
468 
469   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
470   // doesn't matter unless someone is crazy enough to use something that
471   // doesn't fold to a constant for the ordering.
472   CGF.Builder.SetInsertPoint(MonotonicBB);
473   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
475   CGF.Builder.CreateBr(ContBB);
476 
477   if (AcquireBB) {
478     CGF.Builder.SetInsertPoint(AcquireBB);
479     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
480                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
481     CGF.Builder.CreateBr(ContBB);
482     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
483                 AcquireBB);
484     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
485                 AcquireBB);
486   }
487   if (SeqCstBB) {
488     CGF.Builder.SetInsertPoint(SeqCstBB);
489     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
490                       llvm::AtomicOrdering::SequentiallyConsistent, Scope);
491     CGF.Builder.CreateBr(ContBB);
492     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
493                 SeqCstBB);
494   }
495 
496   CGF.Builder.SetInsertPoint(ContBB);
497 }
498 
499 /// Duplicate the atomic min/max operation in conventional IR for the builtin
500 /// variants that return the new rather than the original value.
501 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
502                                          AtomicExpr::AtomicOp Op,
503                                          bool IsSigned,
504                                          llvm::Value *OldVal,
505                                          llvm::Value *RHS) {
506   llvm::CmpInst::Predicate Pred;
507   switch (Op) {
508   default:
509     llvm_unreachable("Unexpected min/max operation");
510   case AtomicExpr::AO__atomic_max_fetch:
511     Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
512     break;
513   case AtomicExpr::AO__atomic_min_fetch:
514     Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
515     break;
516   }
517   llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
518   return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
519 }
520 
521 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
522                          Address Ptr, Address Val1, Address Val2,
523                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
524                          uint64_t Size, llvm::AtomicOrdering Order,
525                          llvm::SyncScope::ID Scope) {
526   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
527   bool PostOpMinMax = false;
528   unsigned PostOp = 0;
529 
530   switch (E->getOp()) {
531   case AtomicExpr::AO__c11_atomic_init:
532   case AtomicExpr::AO__opencl_atomic_init:
533     llvm_unreachable("Already handled!");
534 
535   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
536   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
537     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
538                                 FailureOrder, Size, Order, Scope);
539     return;
540   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
541   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
542     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
543                                 FailureOrder, Size, Order, Scope);
544     return;
545   case AtomicExpr::AO__atomic_compare_exchange:
546   case AtomicExpr::AO__atomic_compare_exchange_n: {
547     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
548       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
549                                   Val1, Val2, FailureOrder, Size, Order, Scope);
550     } else {
551       // Create all the relevant BB's
552       llvm::BasicBlock *StrongBB =
553           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
554       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
555       llvm::BasicBlock *ContBB =
556           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
557 
558       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
559       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
560 
561       CGF.Builder.SetInsertPoint(StrongBB);
562       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
563                                   FailureOrder, Size, Order, Scope);
564       CGF.Builder.CreateBr(ContBB);
565 
566       CGF.Builder.SetInsertPoint(WeakBB);
567       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
568                                   FailureOrder, Size, Order, Scope);
569       CGF.Builder.CreateBr(ContBB);
570 
571       CGF.Builder.SetInsertPoint(ContBB);
572     }
573     return;
574   }
575   case AtomicExpr::AO__c11_atomic_load:
576   case AtomicExpr::AO__opencl_atomic_load:
577   case AtomicExpr::AO__atomic_load_n:
578   case AtomicExpr::AO__atomic_load: {
579     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
580     Load->setAtomic(Order, Scope);
581     Load->setVolatile(E->isVolatile());
582     CGF.Builder.CreateStore(Load, Dest);
583     return;
584   }
585 
586   case AtomicExpr::AO__c11_atomic_store:
587   case AtomicExpr::AO__opencl_atomic_store:
588   case AtomicExpr::AO__atomic_store:
589   case AtomicExpr::AO__atomic_store_n: {
590     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
591     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
592     Store->setAtomic(Order, Scope);
593     Store->setVolatile(E->isVolatile());
594     return;
595   }
596 
597   case AtomicExpr::AO__c11_atomic_exchange:
598   case AtomicExpr::AO__opencl_atomic_exchange:
599   case AtomicExpr::AO__atomic_exchange_n:
600   case AtomicExpr::AO__atomic_exchange:
601     Op = llvm::AtomicRMWInst::Xchg;
602     break;
603 
604   case AtomicExpr::AO__atomic_add_fetch:
605     PostOp = llvm::Instruction::Add;
606     LLVM_FALLTHROUGH;
607   case AtomicExpr::AO__c11_atomic_fetch_add:
608   case AtomicExpr::AO__opencl_atomic_fetch_add:
609   case AtomicExpr::AO__atomic_fetch_add:
610     Op = llvm::AtomicRMWInst::Add;
611     break;
612 
613   case AtomicExpr::AO__atomic_sub_fetch:
614     PostOp = llvm::Instruction::Sub;
615     LLVM_FALLTHROUGH;
616   case AtomicExpr::AO__c11_atomic_fetch_sub:
617   case AtomicExpr::AO__opencl_atomic_fetch_sub:
618   case AtomicExpr::AO__atomic_fetch_sub:
619     Op = llvm::AtomicRMWInst::Sub;
620     break;
621 
622   case AtomicExpr::AO__atomic_min_fetch:
623     PostOpMinMax = true;
624     LLVM_FALLTHROUGH;
625   case AtomicExpr::AO__c11_atomic_fetch_min:
626   case AtomicExpr::AO__opencl_atomic_fetch_min:
627   case AtomicExpr::AO__atomic_fetch_min:
628     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
629                                                   : llvm::AtomicRMWInst::UMin;
630     break;
631 
632   case AtomicExpr::AO__atomic_max_fetch:
633     PostOpMinMax = true;
634     LLVM_FALLTHROUGH;
635   case AtomicExpr::AO__c11_atomic_fetch_max:
636   case AtomicExpr::AO__opencl_atomic_fetch_max:
637   case AtomicExpr::AO__atomic_fetch_max:
638     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
639                                                   : llvm::AtomicRMWInst::UMax;
640     break;
641 
642   case AtomicExpr::AO__atomic_and_fetch:
643     PostOp = llvm::Instruction::And;
644     LLVM_FALLTHROUGH;
645   case AtomicExpr::AO__c11_atomic_fetch_and:
646   case AtomicExpr::AO__opencl_atomic_fetch_and:
647   case AtomicExpr::AO__atomic_fetch_and:
648     Op = llvm::AtomicRMWInst::And;
649     break;
650 
651   case AtomicExpr::AO__atomic_or_fetch:
652     PostOp = llvm::Instruction::Or;
653     LLVM_FALLTHROUGH;
654   case AtomicExpr::AO__c11_atomic_fetch_or:
655   case AtomicExpr::AO__opencl_atomic_fetch_or:
656   case AtomicExpr::AO__atomic_fetch_or:
657     Op = llvm::AtomicRMWInst::Or;
658     break;
659 
660   case AtomicExpr::AO__atomic_xor_fetch:
661     PostOp = llvm::Instruction::Xor;
662     LLVM_FALLTHROUGH;
663   case AtomicExpr::AO__c11_atomic_fetch_xor:
664   case AtomicExpr::AO__opencl_atomic_fetch_xor:
665   case AtomicExpr::AO__atomic_fetch_xor:
666     Op = llvm::AtomicRMWInst::Xor;
667     break;
668 
669   case AtomicExpr::AO__atomic_nand_fetch:
670     PostOp = llvm::Instruction::And; // the NOT is special cased below
671     LLVM_FALLTHROUGH;
672   case AtomicExpr::AO__atomic_fetch_nand:
673     Op = llvm::AtomicRMWInst::Nand;
674     break;
675   }
676 
677   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
678   llvm::AtomicRMWInst *RMWI =
679       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
680   RMWI->setVolatile(E->isVolatile());
681 
682   // For __atomic_*_fetch operations, perform the operation again to
683   // determine the value which was written.
684   llvm::Value *Result = RMWI;
685   if (PostOpMinMax)
686     Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
687                                   E->getValueType()->isSignedIntegerType(),
688                                   RMWI, LoadVal1);
689   else if (PostOp)
690     Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
691                                      LoadVal1);
692   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
693     Result = CGF.Builder.CreateNot(Result);
694   CGF.Builder.CreateStore(Result, Dest);
695 }
696 
697 // This function emits any expression (scalar, complex, or aggregate)
698 // into a temporary alloca.
699 static Address
700 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
701   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
702   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
703                        /*Init*/ true);
704   return DeclPtr;
705 }
706 
707 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
708                          Address Ptr, Address Val1, Address Val2,
709                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
710                          uint64_t Size, llvm::AtomicOrdering Order,
711                          llvm::Value *Scope) {
712   auto ScopeModel = Expr->getScopeModel();
713 
714   // LLVM atomic instructions always have synch scope. If clang atomic
715   // expression has no scope operand, use default LLVM synch scope.
716   if (!ScopeModel) {
717     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
718                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
719     return;
720   }
721 
722   // Handle constant scope.
723   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
724     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
725         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
726         Order, CGF.CGM.getLLVMContext());
727     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
728                  Order, SCID);
729     return;
730   }
731 
732   // Handle non-constant scope.
733   auto &Builder = CGF.Builder;
734   auto Scopes = ScopeModel->getRuntimeValues();
735   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
736   for (auto S : Scopes)
737     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
738 
739   llvm::BasicBlock *ContBB =
740       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
741 
742   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
743   // If unsupported synch scope is encountered at run time, assume a fallback
744   // synch scope value.
745   auto FallBack = ScopeModel->getFallBackValue();
746   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
747   for (auto S : Scopes) {
748     auto *B = BB[S];
749     if (S != FallBack)
750       SI->addCase(Builder.getInt32(S), B);
751 
752     Builder.SetInsertPoint(B);
753     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
754                  Order,
755                  CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
756                                                          ScopeModel->map(S),
757                                                          Order,
758                                                          CGF.getLLVMContext()));
759     Builder.CreateBr(ContBB);
760   }
761 
762   Builder.SetInsertPoint(ContBB);
763 }
764 
765 static void
766 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
767                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
768                   SourceLocation Loc, CharUnits SizeInChars) {
769   if (UseOptimizedLibcall) {
770     // Load value and pass it to the function directly.
771     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
772     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
773     ValTy =
774         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
775     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
776                                                 SizeInBits)->getPointerTo();
777     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
778     Val = CGF.EmitLoadOfScalar(Ptr, false,
779                                CGF.getContext().getPointerType(ValTy),
780                                Loc);
781     // Coerce the value into an appropriately sized integer type.
782     Args.add(RValue::get(Val), ValTy);
783   } else {
784     // Non-optimized functions always take a reference.
785     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
786                          CGF.getContext().VoidPtrTy);
787   }
788 }
789 
790 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
791   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
792   QualType MemTy = AtomicTy;
793   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
794     MemTy = AT->getValueType();
795   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
796 
797   Address Val1 = Address::invalid();
798   Address Val2 = Address::invalid();
799   Address Dest = Address::invalid();
800   Address Ptr = EmitPointerWithAlignment(E->getPtr());
801 
802   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
803       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
804     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
805     EmitAtomicInit(E->getVal1(), lvalue);
806     return RValue::get(nullptr);
807   }
808 
809   CharUnits sizeChars, alignChars;
810   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
811   uint64_t Size = sizeChars.getQuantity();
812   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
813 
814   bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
815   bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
816   bool UseLibcall = Misaligned | Oversized;
817   CharUnits MaxInlineWidth =
818       getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
819 
820   DiagnosticsEngine &Diags = CGM.getDiags();
821 
822   if (Misaligned) {
823     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
824         << (int)sizeChars.getQuantity()
825         << (int)Ptr.getAlignment().getQuantity();
826   }
827 
828   if (Oversized) {
829     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
830         << (int)sizeChars.getQuantity() << (int)MaxInlineWidth.getQuantity();
831   }
832 
833   llvm::Value *Order = EmitScalarExpr(E->getOrder());
834   llvm::Value *Scope =
835       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
836 
837   switch (E->getOp()) {
838   case AtomicExpr::AO__c11_atomic_init:
839   case AtomicExpr::AO__opencl_atomic_init:
840     llvm_unreachable("Already handled above with EmitAtomicInit!");
841 
842   case AtomicExpr::AO__c11_atomic_load:
843   case AtomicExpr::AO__opencl_atomic_load:
844   case AtomicExpr::AO__atomic_load_n:
845     break;
846 
847   case AtomicExpr::AO__atomic_load:
848     Dest = EmitPointerWithAlignment(E->getVal1());
849     break;
850 
851   case AtomicExpr::AO__atomic_store:
852     Val1 = EmitPointerWithAlignment(E->getVal1());
853     break;
854 
855   case AtomicExpr::AO__atomic_exchange:
856     Val1 = EmitPointerWithAlignment(E->getVal1());
857     Dest = EmitPointerWithAlignment(E->getVal2());
858     break;
859 
860   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
861   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
862   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
863   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
864   case AtomicExpr::AO__atomic_compare_exchange_n:
865   case AtomicExpr::AO__atomic_compare_exchange:
866     Val1 = EmitPointerWithAlignment(E->getVal1());
867     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
868       Val2 = EmitPointerWithAlignment(E->getVal2());
869     else
870       Val2 = EmitValToTemp(*this, E->getVal2());
871     OrderFail = EmitScalarExpr(E->getOrderFail());
872     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
873         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
874       IsWeak = EmitScalarExpr(E->getWeak());
875     break;
876 
877   case AtomicExpr::AO__c11_atomic_fetch_add:
878   case AtomicExpr::AO__c11_atomic_fetch_sub:
879   case AtomicExpr::AO__opencl_atomic_fetch_add:
880   case AtomicExpr::AO__opencl_atomic_fetch_sub:
881     if (MemTy->isPointerType()) {
882       // For pointer arithmetic, we're required to do a bit of math:
883       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
884       // ... but only for the C11 builtins. The GNU builtins expect the
885       // user to multiply by sizeof(T).
886       QualType Val1Ty = E->getVal1()->getType();
887       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
888       CharUnits PointeeIncAmt =
889           getContext().getTypeSizeInChars(MemTy->getPointeeType());
890       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
891       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
892       Val1 = Temp;
893       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
894       break;
895     }
896       LLVM_FALLTHROUGH;
897   case AtomicExpr::AO__atomic_fetch_add:
898   case AtomicExpr::AO__atomic_fetch_sub:
899   case AtomicExpr::AO__atomic_add_fetch:
900   case AtomicExpr::AO__atomic_sub_fetch:
901   case AtomicExpr::AO__c11_atomic_store:
902   case AtomicExpr::AO__c11_atomic_exchange:
903   case AtomicExpr::AO__opencl_atomic_store:
904   case AtomicExpr::AO__opencl_atomic_exchange:
905   case AtomicExpr::AO__atomic_store_n:
906   case AtomicExpr::AO__atomic_exchange_n:
907   case AtomicExpr::AO__c11_atomic_fetch_and:
908   case AtomicExpr::AO__c11_atomic_fetch_or:
909   case AtomicExpr::AO__c11_atomic_fetch_xor:
910   case AtomicExpr::AO__c11_atomic_fetch_max:
911   case AtomicExpr::AO__c11_atomic_fetch_min:
912   case AtomicExpr::AO__opencl_atomic_fetch_and:
913   case AtomicExpr::AO__opencl_atomic_fetch_or:
914   case AtomicExpr::AO__opencl_atomic_fetch_xor:
915   case AtomicExpr::AO__opencl_atomic_fetch_min:
916   case AtomicExpr::AO__opencl_atomic_fetch_max:
917   case AtomicExpr::AO__atomic_fetch_and:
918   case AtomicExpr::AO__atomic_fetch_or:
919   case AtomicExpr::AO__atomic_fetch_xor:
920   case AtomicExpr::AO__atomic_fetch_nand:
921   case AtomicExpr::AO__atomic_and_fetch:
922   case AtomicExpr::AO__atomic_or_fetch:
923   case AtomicExpr::AO__atomic_xor_fetch:
924   case AtomicExpr::AO__atomic_nand_fetch:
925   case AtomicExpr::AO__atomic_max_fetch:
926   case AtomicExpr::AO__atomic_min_fetch:
927   case AtomicExpr::AO__atomic_fetch_max:
928   case AtomicExpr::AO__atomic_fetch_min:
929     Val1 = EmitValToTemp(*this, E->getVal1());
930     break;
931   }
932 
933   QualType RValTy = E->getType().getUnqualifiedType();
934 
935   // The inlined atomics only function on iN types, where N is a power of 2. We
936   // need to make sure (via temporaries if necessary) that all incoming values
937   // are compatible.
938   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
939   AtomicInfo Atomics(*this, AtomicVal);
940 
941   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
942   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
943   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
944   if (Dest.isValid())
945     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
946   else if (E->isCmpXChg())
947     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
948   else if (!RValTy->isVoidType())
949     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
950 
951   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
952   if (UseLibcall) {
953     bool UseOptimizedLibcall = false;
954     switch (E->getOp()) {
955     case AtomicExpr::AO__c11_atomic_init:
956     case AtomicExpr::AO__opencl_atomic_init:
957       llvm_unreachable("Already handled above with EmitAtomicInit!");
958 
959     case AtomicExpr::AO__c11_atomic_fetch_add:
960     case AtomicExpr::AO__opencl_atomic_fetch_add:
961     case AtomicExpr::AO__atomic_fetch_add:
962     case AtomicExpr::AO__c11_atomic_fetch_and:
963     case AtomicExpr::AO__opencl_atomic_fetch_and:
964     case AtomicExpr::AO__atomic_fetch_and:
965     case AtomicExpr::AO__c11_atomic_fetch_or:
966     case AtomicExpr::AO__opencl_atomic_fetch_or:
967     case AtomicExpr::AO__atomic_fetch_or:
968     case AtomicExpr::AO__atomic_fetch_nand:
969     case AtomicExpr::AO__c11_atomic_fetch_sub:
970     case AtomicExpr::AO__opencl_atomic_fetch_sub:
971     case AtomicExpr::AO__atomic_fetch_sub:
972     case AtomicExpr::AO__c11_atomic_fetch_xor:
973     case AtomicExpr::AO__opencl_atomic_fetch_xor:
974     case AtomicExpr::AO__opencl_atomic_fetch_min:
975     case AtomicExpr::AO__opencl_atomic_fetch_max:
976     case AtomicExpr::AO__atomic_fetch_xor:
977     case AtomicExpr::AO__c11_atomic_fetch_max:
978     case AtomicExpr::AO__c11_atomic_fetch_min:
979     case AtomicExpr::AO__atomic_add_fetch:
980     case AtomicExpr::AO__atomic_and_fetch:
981     case AtomicExpr::AO__atomic_nand_fetch:
982     case AtomicExpr::AO__atomic_or_fetch:
983     case AtomicExpr::AO__atomic_sub_fetch:
984     case AtomicExpr::AO__atomic_xor_fetch:
985     case AtomicExpr::AO__atomic_fetch_max:
986     case AtomicExpr::AO__atomic_fetch_min:
987     case AtomicExpr::AO__atomic_max_fetch:
988     case AtomicExpr::AO__atomic_min_fetch:
989       // For these, only library calls for certain sizes exist.
990       UseOptimizedLibcall = true;
991       break;
992 
993     case AtomicExpr::AO__atomic_load:
994     case AtomicExpr::AO__atomic_store:
995     case AtomicExpr::AO__atomic_exchange:
996     case AtomicExpr::AO__atomic_compare_exchange:
997       // Use the generic version if we don't know that the operand will be
998       // suitably aligned for the optimized version.
999       if (Misaligned)
1000         break;
1001       LLVM_FALLTHROUGH;
1002     case AtomicExpr::AO__c11_atomic_load:
1003     case AtomicExpr::AO__c11_atomic_store:
1004     case AtomicExpr::AO__c11_atomic_exchange:
1005     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1006     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1007     case AtomicExpr::AO__opencl_atomic_load:
1008     case AtomicExpr::AO__opencl_atomic_store:
1009     case AtomicExpr::AO__opencl_atomic_exchange:
1010     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1011     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1012     case AtomicExpr::AO__atomic_load_n:
1013     case AtomicExpr::AO__atomic_store_n:
1014     case AtomicExpr::AO__atomic_exchange_n:
1015     case AtomicExpr::AO__atomic_compare_exchange_n:
1016       // Only use optimized library calls for sizes for which they exist.
1017       // FIXME: Size == 16 optimized library functions exist too.
1018       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1019         UseOptimizedLibcall = true;
1020       break;
1021     }
1022 
1023     CallArgList Args;
1024     if (!UseOptimizedLibcall) {
1025       // For non-optimized library calls, the size is the first parameter
1026       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1027                getContext().getSizeType());
1028     }
1029     // Atomic address is the first or second parameter
1030     // The OpenCL atomic library functions only accept pointer arguments to
1031     // generic address space.
1032     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1033       if (!E->isOpenCL())
1034         return V;
1035       auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1036       if (AS == LangAS::opencl_generic)
1037         return V;
1038       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1039       auto T = V->getType();
1040       auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1041 
1042       return getTargetHooks().performAddrSpaceCast(
1043           *this, V, AS, LangAS::opencl_generic, DestType, false);
1044     };
1045 
1046     Args.add(RValue::get(CastToGenericAddrSpace(
1047                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1048              getContext().VoidPtrTy);
1049 
1050     std::string LibCallName;
1051     QualType LoweredMemTy =
1052       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1053     QualType RetTy;
1054     bool HaveRetTy = false;
1055     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1056     bool PostOpMinMax = false;
1057     switch (E->getOp()) {
1058     case AtomicExpr::AO__c11_atomic_init:
1059     case AtomicExpr::AO__opencl_atomic_init:
1060       llvm_unreachable("Already handled!");
1061 
1062     // There is only one libcall for compare an exchange, because there is no
1063     // optimisation benefit possible from a libcall version of a weak compare
1064     // and exchange.
1065     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1066     //                                void *desired, int success, int failure)
1067     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1068     //                                  int success, int failure)
1069     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1070     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1071     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1072     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1073     case AtomicExpr::AO__atomic_compare_exchange:
1074     case AtomicExpr::AO__atomic_compare_exchange_n:
1075       LibCallName = "__atomic_compare_exchange";
1076       RetTy = getContext().BoolTy;
1077       HaveRetTy = true;
1078       Args.add(
1079           RValue::get(CastToGenericAddrSpace(
1080               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1081           getContext().VoidPtrTy);
1082       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1083                         MemTy, E->getExprLoc(), sizeChars);
1084       Args.add(RValue::get(Order), getContext().IntTy);
1085       Order = OrderFail;
1086       break;
1087     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1088     //                        int order)
1089     // T __atomic_exchange_N(T *mem, T val, int order)
1090     case AtomicExpr::AO__c11_atomic_exchange:
1091     case AtomicExpr::AO__opencl_atomic_exchange:
1092     case AtomicExpr::AO__atomic_exchange_n:
1093     case AtomicExpr::AO__atomic_exchange:
1094       LibCallName = "__atomic_exchange";
1095       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1096                         MemTy, E->getExprLoc(), sizeChars);
1097       break;
1098     // void __atomic_store(size_t size, void *mem, void *val, int order)
1099     // void __atomic_store_N(T *mem, T val, int order)
1100     case AtomicExpr::AO__c11_atomic_store:
1101     case AtomicExpr::AO__opencl_atomic_store:
1102     case AtomicExpr::AO__atomic_store:
1103     case AtomicExpr::AO__atomic_store_n:
1104       LibCallName = "__atomic_store";
1105       RetTy = getContext().VoidTy;
1106       HaveRetTy = true;
1107       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1108                         MemTy, E->getExprLoc(), sizeChars);
1109       break;
1110     // void __atomic_load(size_t size, void *mem, void *return, int order)
1111     // T __atomic_load_N(T *mem, int order)
1112     case AtomicExpr::AO__c11_atomic_load:
1113     case AtomicExpr::AO__opencl_atomic_load:
1114     case AtomicExpr::AO__atomic_load:
1115     case AtomicExpr::AO__atomic_load_n:
1116       LibCallName = "__atomic_load";
1117       break;
1118     // T __atomic_add_fetch_N(T *mem, T val, int order)
1119     // T __atomic_fetch_add_N(T *mem, T val, int order)
1120     case AtomicExpr::AO__atomic_add_fetch:
1121       PostOp = llvm::Instruction::Add;
1122       LLVM_FALLTHROUGH;
1123     case AtomicExpr::AO__c11_atomic_fetch_add:
1124     case AtomicExpr::AO__opencl_atomic_fetch_add:
1125     case AtomicExpr::AO__atomic_fetch_add:
1126       LibCallName = "__atomic_fetch_add";
1127       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1128                         LoweredMemTy, E->getExprLoc(), sizeChars);
1129       break;
1130     // T __atomic_and_fetch_N(T *mem, T val, int order)
1131     // T __atomic_fetch_and_N(T *mem, T val, int order)
1132     case AtomicExpr::AO__atomic_and_fetch:
1133       PostOp = llvm::Instruction::And;
1134       LLVM_FALLTHROUGH;
1135     case AtomicExpr::AO__c11_atomic_fetch_and:
1136     case AtomicExpr::AO__opencl_atomic_fetch_and:
1137     case AtomicExpr::AO__atomic_fetch_and:
1138       LibCallName = "__atomic_fetch_and";
1139       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1140                         MemTy, E->getExprLoc(), sizeChars);
1141       break;
1142     // T __atomic_or_fetch_N(T *mem, T val, int order)
1143     // T __atomic_fetch_or_N(T *mem, T val, int order)
1144     case AtomicExpr::AO__atomic_or_fetch:
1145       PostOp = llvm::Instruction::Or;
1146       LLVM_FALLTHROUGH;
1147     case AtomicExpr::AO__c11_atomic_fetch_or:
1148     case AtomicExpr::AO__opencl_atomic_fetch_or:
1149     case AtomicExpr::AO__atomic_fetch_or:
1150       LibCallName = "__atomic_fetch_or";
1151       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1152                         MemTy, E->getExprLoc(), sizeChars);
1153       break;
1154     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1155     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1156     case AtomicExpr::AO__atomic_sub_fetch:
1157       PostOp = llvm::Instruction::Sub;
1158       LLVM_FALLTHROUGH;
1159     case AtomicExpr::AO__c11_atomic_fetch_sub:
1160     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1161     case AtomicExpr::AO__atomic_fetch_sub:
1162       LibCallName = "__atomic_fetch_sub";
1163       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1164                         LoweredMemTy, E->getExprLoc(), sizeChars);
1165       break;
1166     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1167     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1168     case AtomicExpr::AO__atomic_xor_fetch:
1169       PostOp = llvm::Instruction::Xor;
1170       LLVM_FALLTHROUGH;
1171     case AtomicExpr::AO__c11_atomic_fetch_xor:
1172     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1173     case AtomicExpr::AO__atomic_fetch_xor:
1174       LibCallName = "__atomic_fetch_xor";
1175       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1176                         MemTy, E->getExprLoc(), sizeChars);
1177       break;
1178     case AtomicExpr::AO__atomic_min_fetch:
1179       PostOpMinMax = true;
1180       LLVM_FALLTHROUGH;
1181     case AtomicExpr::AO__c11_atomic_fetch_min:
1182     case AtomicExpr::AO__atomic_fetch_min:
1183     case AtomicExpr::AO__opencl_atomic_fetch_min:
1184       LibCallName = E->getValueType()->isSignedIntegerType()
1185                         ? "__atomic_fetch_min"
1186                         : "__atomic_fetch_umin";
1187       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1188                         LoweredMemTy, E->getExprLoc(), sizeChars);
1189       break;
1190     case AtomicExpr::AO__atomic_max_fetch:
1191       PostOpMinMax = true;
1192       LLVM_FALLTHROUGH;
1193     case AtomicExpr::AO__c11_atomic_fetch_max:
1194     case AtomicExpr::AO__atomic_fetch_max:
1195     case AtomicExpr::AO__opencl_atomic_fetch_max:
1196       LibCallName = E->getValueType()->isSignedIntegerType()
1197                         ? "__atomic_fetch_max"
1198                         : "__atomic_fetch_umax";
1199       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1200                         LoweredMemTy, E->getExprLoc(), sizeChars);
1201       break;
1202     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1203     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1204     case AtomicExpr::AO__atomic_nand_fetch:
1205       PostOp = llvm::Instruction::And; // the NOT is special cased below
1206       LLVM_FALLTHROUGH;
1207     case AtomicExpr::AO__atomic_fetch_nand:
1208       LibCallName = "__atomic_fetch_nand";
1209       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1210                         MemTy, E->getExprLoc(), sizeChars);
1211       break;
1212     }
1213 
1214     if (E->isOpenCL()) {
1215       LibCallName = std::string("__opencl") +
1216           StringRef(LibCallName).drop_front(1).str();
1217 
1218     }
1219     // Optimized functions have the size in their name.
1220     if (UseOptimizedLibcall)
1221       LibCallName += "_" + llvm::utostr(Size);
1222     // By default, assume we return a value of the atomic type.
1223     if (!HaveRetTy) {
1224       if (UseOptimizedLibcall) {
1225         // Value is returned directly.
1226         // The function returns an appropriately sized integer type.
1227         RetTy = getContext().getIntTypeForBitwidth(
1228             getContext().toBits(sizeChars), /*Signed=*/false);
1229       } else {
1230         // Value is returned through parameter before the order.
1231         RetTy = getContext().VoidTy;
1232         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1233                  getContext().VoidPtrTy);
1234       }
1235     }
1236     // order is always the last parameter
1237     Args.add(RValue::get(Order),
1238              getContext().IntTy);
1239     if (E->isOpenCL())
1240       Args.add(RValue::get(Scope), getContext().IntTy);
1241 
1242     // PostOp is only needed for the atomic_*_fetch operations, and
1243     // thus is only needed for and implemented in the
1244     // UseOptimizedLibcall codepath.
1245     assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1246 
1247     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1248     // The value is returned directly from the libcall.
1249     if (E->isCmpXChg())
1250       return Res;
1251 
1252     // The value is returned directly for optimized libcalls but the expr
1253     // provided an out-param.
1254     if (UseOptimizedLibcall && Res.getScalarVal()) {
1255       llvm::Value *ResVal = Res.getScalarVal();
1256       if (PostOpMinMax) {
1257         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1258         ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1259                                       E->getValueType()->isSignedIntegerType(),
1260                                       ResVal, LoadVal1);
1261       } else if (PostOp) {
1262         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1263         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1264       }
1265       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1266         ResVal = Builder.CreateNot(ResVal);
1267 
1268       Builder.CreateStore(
1269           ResVal,
1270           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1271     }
1272 
1273     if (RValTy->isVoidType())
1274       return RValue::get(nullptr);
1275 
1276     return convertTempToRValue(
1277         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1278         RValTy, E->getExprLoc());
1279   }
1280 
1281   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1282                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1283                  E->getOp() == AtomicExpr::AO__atomic_store ||
1284                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1285   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1286                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1287                 E->getOp() == AtomicExpr::AO__atomic_load ||
1288                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1289 
1290   if (isa<llvm::ConstantInt>(Order)) {
1291     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1292     // We should not ever get to a case where the ordering isn't a valid C ABI
1293     // value, but it's hard to enforce that in general.
1294     if (llvm::isValidAtomicOrderingCABI(ord))
1295       switch ((llvm::AtomicOrderingCABI)ord) {
1296       case llvm::AtomicOrderingCABI::relaxed:
1297         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1298                      llvm::AtomicOrdering::Monotonic, Scope);
1299         break;
1300       case llvm::AtomicOrderingCABI::consume:
1301       case llvm::AtomicOrderingCABI::acquire:
1302         if (IsStore)
1303           break; // Avoid crashing on code with undefined behavior
1304         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305                      llvm::AtomicOrdering::Acquire, Scope);
1306         break;
1307       case llvm::AtomicOrderingCABI::release:
1308         if (IsLoad)
1309           break; // Avoid crashing on code with undefined behavior
1310         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1311                      llvm::AtomicOrdering::Release, Scope);
1312         break;
1313       case llvm::AtomicOrderingCABI::acq_rel:
1314         if (IsLoad || IsStore)
1315           break; // Avoid crashing on code with undefined behavior
1316         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1317                      llvm::AtomicOrdering::AcquireRelease, Scope);
1318         break;
1319       case llvm::AtomicOrderingCABI::seq_cst:
1320         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1321                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1322         break;
1323       }
1324     if (RValTy->isVoidType())
1325       return RValue::get(nullptr);
1326 
1327     return convertTempToRValue(
1328         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1329                                         Dest.getAddressSpace())),
1330         RValTy, E->getExprLoc());
1331   }
1332 
1333   // Long case, when Order isn't obviously constant.
1334 
1335   // Create all the relevant BB's
1336   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1337                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1338                    *SeqCstBB = nullptr;
1339   MonotonicBB = createBasicBlock("monotonic", CurFn);
1340   if (!IsStore)
1341     AcquireBB = createBasicBlock("acquire", CurFn);
1342   if (!IsLoad)
1343     ReleaseBB = createBasicBlock("release", CurFn);
1344   if (!IsLoad && !IsStore)
1345     AcqRelBB = createBasicBlock("acqrel", CurFn);
1346   SeqCstBB = createBasicBlock("seqcst", CurFn);
1347   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1348 
1349   // Create the switch for the split
1350   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1351   // doesn't matter unless someone is crazy enough to use something that
1352   // doesn't fold to a constant for the ordering.
1353   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1354   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1355 
1356   // Emit all the different atomics
1357   Builder.SetInsertPoint(MonotonicBB);
1358   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1359                llvm::AtomicOrdering::Monotonic, Scope);
1360   Builder.CreateBr(ContBB);
1361   if (!IsStore) {
1362     Builder.SetInsertPoint(AcquireBB);
1363     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1364                  llvm::AtomicOrdering::Acquire, Scope);
1365     Builder.CreateBr(ContBB);
1366     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1367                 AcquireBB);
1368     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1369                 AcquireBB);
1370   }
1371   if (!IsLoad) {
1372     Builder.SetInsertPoint(ReleaseBB);
1373     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1374                  llvm::AtomicOrdering::Release, Scope);
1375     Builder.CreateBr(ContBB);
1376     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1377                 ReleaseBB);
1378   }
1379   if (!IsLoad && !IsStore) {
1380     Builder.SetInsertPoint(AcqRelBB);
1381     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1382                  llvm::AtomicOrdering::AcquireRelease, Scope);
1383     Builder.CreateBr(ContBB);
1384     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1385                 AcqRelBB);
1386   }
1387   Builder.SetInsertPoint(SeqCstBB);
1388   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1389                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1390   Builder.CreateBr(ContBB);
1391   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1392               SeqCstBB);
1393 
1394   // Cleanup and return
1395   Builder.SetInsertPoint(ContBB);
1396   if (RValTy->isVoidType())
1397     return RValue::get(nullptr);
1398 
1399   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1400   return convertTempToRValue(
1401       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1402                                       Dest.getAddressSpace())),
1403       RValTy, E->getExprLoc());
1404 }
1405 
1406 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1407   unsigned addrspace =
1408     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1409   llvm::IntegerType *ty =
1410     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1411   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1412 }
1413 
1414 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1415   llvm::Type *Ty = Addr.getElementType();
1416   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1417   if (SourceSizeInBits != AtomicSizeInBits) {
1418     Address Tmp = CreateTempAlloca();
1419     CGF.Builder.CreateMemCpy(Tmp, Addr,
1420                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1421     Addr = Tmp;
1422   }
1423 
1424   return emitCastToAtomicIntPointer(Addr);
1425 }
1426 
1427 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1428                                              AggValueSlot resultSlot,
1429                                              SourceLocation loc,
1430                                              bool asValue) const {
1431   if (LVal.isSimple()) {
1432     if (EvaluationKind == TEK_Aggregate)
1433       return resultSlot.asRValue();
1434 
1435     // Drill into the padding structure if we have one.
1436     if (hasPadding())
1437       addr = CGF.Builder.CreateStructGEP(addr, 0);
1438 
1439     // Otherwise, just convert the temporary to an r-value using the
1440     // normal conversion routine.
1441     return CGF.convertTempToRValue(addr, getValueType(), loc);
1442   }
1443   if (!asValue)
1444     // Get RValue from temp memory as atomic for non-simple lvalues
1445     return RValue::get(CGF.Builder.CreateLoad(addr));
1446   if (LVal.isBitField())
1447     return CGF.EmitLoadOfBitfieldLValue(
1448         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1449                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1450   if (LVal.isVectorElt())
1451     return CGF.EmitLoadOfLValue(
1452         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1453                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1454   assert(LVal.isExtVectorElt());
1455   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1456       addr, LVal.getExtVectorElts(), LVal.getType(),
1457       LVal.getBaseInfo(), TBAAAccessInfo()));
1458 }
1459 
1460 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1461                                              AggValueSlot ResultSlot,
1462                                              SourceLocation Loc,
1463                                              bool AsValue) const {
1464   // Try not to in some easy cases.
1465   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1466   if (getEvaluationKind() == TEK_Scalar &&
1467       (((!LVal.isBitField() ||
1468          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1469         !hasPadding()) ||
1470        !AsValue)) {
1471     auto *ValTy = AsValue
1472                       ? CGF.ConvertTypeForMem(ValueTy)
1473                       : getAtomicAddress().getType()->getPointerElementType();
1474     if (ValTy->isIntegerTy()) {
1475       assert(IntVal->getType() == ValTy && "Different integer types.");
1476       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1477     } else if (ValTy->isPointerTy())
1478       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1479     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1480       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1481   }
1482 
1483   // Create a temporary.  This needs to be big enough to hold the
1484   // atomic integer.
1485   Address Temp = Address::invalid();
1486   bool TempIsVolatile = false;
1487   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1488     assert(!ResultSlot.isIgnored());
1489     Temp = ResultSlot.getAddress();
1490     TempIsVolatile = ResultSlot.isVolatile();
1491   } else {
1492     Temp = CreateTempAlloca();
1493   }
1494 
1495   // Slam the integer into the temporary.
1496   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1497   CGF.Builder.CreateStore(IntVal, CastTemp)
1498       ->setVolatile(TempIsVolatile);
1499 
1500   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1501 }
1502 
1503 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1504                                        llvm::AtomicOrdering AO, bool) {
1505   // void __atomic_load(size_t size, void *mem, void *return, int order);
1506   CallArgList Args;
1507   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1508   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1509            CGF.getContext().VoidPtrTy);
1510   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1511            CGF.getContext().VoidPtrTy);
1512   Args.add(
1513       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1514       CGF.getContext().IntTy);
1515   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1516 }
1517 
1518 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1519                                           bool IsVolatile) {
1520   // Okay, we're doing this natively.
1521   Address Addr = getAtomicAddressAsAtomicIntPointer();
1522   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1523   Load->setAtomic(AO);
1524 
1525   // Other decoration.
1526   if (IsVolatile)
1527     Load->setVolatile(true);
1528   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1529   return Load;
1530 }
1531 
1532 /// An LValue is a candidate for having its loads and stores be made atomic if
1533 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1534 /// performing such an operation can be performed without a libcall.
1535 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1536   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1537   AtomicInfo AI(*this, LV);
1538   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1539   // An atomic is inline if we don't need to use a libcall.
1540   bool AtomicIsInline = !AI.shouldUseLibcall();
1541   // MSVC doesn't seem to do this for types wider than a pointer.
1542   if (getContext().getTypeSize(LV.getType()) >
1543       getContext().getTypeSize(getContext().getIntPtrType()))
1544     return false;
1545   return IsVolatile && AtomicIsInline;
1546 }
1547 
1548 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1549                                        AggValueSlot Slot) {
1550   llvm::AtomicOrdering AO;
1551   bool IsVolatile = LV.isVolatileQualified();
1552   if (LV.getType()->isAtomicType()) {
1553     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1554   } else {
1555     AO = llvm::AtomicOrdering::Acquire;
1556     IsVolatile = true;
1557   }
1558   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1559 }
1560 
1561 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1562                                   bool AsValue, llvm::AtomicOrdering AO,
1563                                   bool IsVolatile) {
1564   // Check whether we should use a library call.
1565   if (shouldUseLibcall()) {
1566     Address TempAddr = Address::invalid();
1567     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1568       assert(getEvaluationKind() == TEK_Aggregate);
1569       TempAddr = ResultSlot.getAddress();
1570     } else
1571       TempAddr = CreateTempAlloca();
1572 
1573     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1574 
1575     // Okay, turn that back into the original value or whole atomic (for
1576     // non-simple lvalues) type.
1577     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1578   }
1579 
1580   // Okay, we're doing this natively.
1581   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1582 
1583   // If we're ignoring an aggregate return, don't do anything.
1584   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1585     return RValue::getAggregate(Address::invalid(), false);
1586 
1587   // Okay, turn that back into the original value or atomic (for non-simple
1588   // lvalues) type.
1589   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1590 }
1591 
1592 /// Emit a load from an l-value of atomic type.  Note that the r-value
1593 /// we produce is an r-value of the atomic *value* type.
1594 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1595                                        llvm::AtomicOrdering AO, bool IsVolatile,
1596                                        AggValueSlot resultSlot) {
1597   AtomicInfo Atomics(*this, src);
1598   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1599                                 IsVolatile);
1600 }
1601 
1602 /// Copy an r-value into memory as part of storing to an atomic type.
1603 /// This needs to create a bit-pattern suitable for atomic operations.
1604 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1605   assert(LVal.isSimple());
1606   // If we have an r-value, the rvalue should be of the atomic type,
1607   // which means that the caller is responsible for having zeroed
1608   // any padding.  Just do an aggregate copy of that type.
1609   if (rvalue.isAggregate()) {
1610     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1611     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1612                                     getAtomicType());
1613     bool IsVolatile = rvalue.isVolatileQualified() ||
1614                       LVal.isVolatileQualified();
1615     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1616                           AggValueSlot::DoesNotOverlap, IsVolatile);
1617     return;
1618   }
1619 
1620   // Okay, otherwise we're copying stuff.
1621 
1622   // Zero out the buffer if necessary.
1623   emitMemSetZeroIfNecessary();
1624 
1625   // Drill past the padding if present.
1626   LValue TempLVal = projectValue();
1627 
1628   // Okay, store the rvalue in.
1629   if (rvalue.isScalar()) {
1630     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1631   } else {
1632     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1633   }
1634 }
1635 
1636 
1637 /// Materialize an r-value into memory for the purposes of storing it
1638 /// to an atomic type.
1639 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1640   // Aggregate r-values are already in memory, and EmitAtomicStore
1641   // requires them to be values of the atomic type.
1642   if (rvalue.isAggregate())
1643     return rvalue.getAggregateAddress();
1644 
1645   // Otherwise, make a temporary and materialize into it.
1646   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1647   AtomicInfo Atomics(CGF, TempLV);
1648   Atomics.emitCopyIntoMemory(rvalue);
1649   return TempLV.getAddress(CGF);
1650 }
1651 
1652 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1653   // If we've got a scalar value of the right size, try to avoid going
1654   // through memory.
1655   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1656     llvm::Value *Value = RVal.getScalarVal();
1657     if (isa<llvm::IntegerType>(Value->getType()))
1658       return CGF.EmitToMemory(Value, ValueTy);
1659     else {
1660       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1661           CGF.getLLVMContext(),
1662           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1663       if (isa<llvm::PointerType>(Value->getType()))
1664         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1665       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1666         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1667     }
1668   }
1669   // Otherwise, we need to go through memory.
1670   // Put the r-value in memory.
1671   Address Addr = materializeRValue(RVal);
1672 
1673   // Cast the temporary to the atomic int type and pull a value out.
1674   Addr = emitCastToAtomicIntPointer(Addr);
1675   return CGF.Builder.CreateLoad(Addr);
1676 }
1677 
1678 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1679     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1680     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1681   // Do the atomic store.
1682   Address Addr = getAtomicAddressAsAtomicIntPointer();
1683   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1684                                                ExpectedVal, DesiredVal,
1685                                                Success, Failure);
1686   // Other decoration.
1687   Inst->setVolatile(LVal.isVolatileQualified());
1688   Inst->setWeak(IsWeak);
1689 
1690   // Okay, turn that back into the original value type.
1691   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1692   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1693   return std::make_pair(PreviousVal, SuccessFailureVal);
1694 }
1695 
1696 llvm::Value *
1697 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1698                                              llvm::Value *DesiredAddr,
1699                                              llvm::AtomicOrdering Success,
1700                                              llvm::AtomicOrdering Failure) {
1701   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1702   // void *desired, int success, int failure);
1703   CallArgList Args;
1704   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1705   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1706            CGF.getContext().VoidPtrTy);
1707   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1708            CGF.getContext().VoidPtrTy);
1709   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1710            CGF.getContext().VoidPtrTy);
1711   Args.add(RValue::get(
1712                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1713            CGF.getContext().IntTy);
1714   Args.add(RValue::get(
1715                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1716            CGF.getContext().IntTy);
1717   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1718                                               CGF.getContext().BoolTy, Args);
1719 
1720   return SuccessFailureRVal.getScalarVal();
1721 }
1722 
1723 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1724     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1725     llvm::AtomicOrdering Failure, bool IsWeak) {
1726   if (isStrongerThan(Failure, Success))
1727     // Don't assert on undefined behavior "failure argument shall be no stronger
1728     // than the success argument".
1729     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1730 
1731   // Check whether we should use a library call.
1732   if (shouldUseLibcall()) {
1733     // Produce a source address.
1734     Address ExpectedAddr = materializeRValue(Expected);
1735     Address DesiredAddr = materializeRValue(Desired);
1736     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1737                                                  DesiredAddr.getPointer(),
1738                                                  Success, Failure);
1739     return std::make_pair(
1740         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1741                                   SourceLocation(), /*AsValue=*/false),
1742         Res);
1743   }
1744 
1745   // If we've got a scalar value of the right size, try to avoid going
1746   // through memory.
1747   auto *ExpectedVal = convertRValueToInt(Expected);
1748   auto *DesiredVal = convertRValueToInt(Desired);
1749   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1750                                          Failure, IsWeak);
1751   return std::make_pair(
1752       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1753                                 SourceLocation(), /*AsValue=*/false),
1754       Res.second);
1755 }
1756 
1757 static void
1758 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1759                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1760                       Address DesiredAddr) {
1761   RValue UpRVal;
1762   LValue AtomicLVal = Atomics.getAtomicLValue();
1763   LValue DesiredLVal;
1764   if (AtomicLVal.isSimple()) {
1765     UpRVal = OldRVal;
1766     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1767   } else {
1768     // Build new lvalue for temp address.
1769     Address Ptr = Atomics.materializeRValue(OldRVal);
1770     LValue UpdateLVal;
1771     if (AtomicLVal.isBitField()) {
1772       UpdateLVal =
1773           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1774                                AtomicLVal.getType(),
1775                                AtomicLVal.getBaseInfo(),
1776                                AtomicLVal.getTBAAInfo());
1777       DesiredLVal =
1778           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1779                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1780                                AtomicLVal.getTBAAInfo());
1781     } else if (AtomicLVal.isVectorElt()) {
1782       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1783                                          AtomicLVal.getType(),
1784                                          AtomicLVal.getBaseInfo(),
1785                                          AtomicLVal.getTBAAInfo());
1786       DesiredLVal = LValue::MakeVectorElt(
1787           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1788           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1789     } else {
1790       assert(AtomicLVal.isExtVectorElt());
1791       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1792                                             AtomicLVal.getType(),
1793                                             AtomicLVal.getBaseInfo(),
1794                                             AtomicLVal.getTBAAInfo());
1795       DesiredLVal = LValue::MakeExtVectorElt(
1796           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1797           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1798     }
1799     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1800   }
1801   // Store new value in the corresponding memory area.
1802   RValue NewRVal = UpdateOp(UpRVal);
1803   if (NewRVal.isScalar()) {
1804     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1805   } else {
1806     assert(NewRVal.isComplex());
1807     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1808                            /*isInit=*/false);
1809   }
1810 }
1811 
1812 void AtomicInfo::EmitAtomicUpdateLibcall(
1813     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1814     bool IsVolatile) {
1815   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1816 
1817   Address ExpectedAddr = CreateTempAlloca();
1818 
1819   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1820   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1821   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1822   CGF.EmitBlock(ContBB);
1823   Address DesiredAddr = CreateTempAlloca();
1824   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1825       requiresMemSetZero(getAtomicAddress().getElementType())) {
1826     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1827     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1828   }
1829   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1830                                            AggValueSlot::ignored(),
1831                                            SourceLocation(), /*AsValue=*/false);
1832   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1833   auto *Res =
1834       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1835                                        DesiredAddr.getPointer(),
1836                                        AO, Failure);
1837   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1838   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1839 }
1840 
1841 void AtomicInfo::EmitAtomicUpdateOp(
1842     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1843     bool IsVolatile) {
1844   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1845 
1846   // Do the atomic load.
1847   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1848   // For non-simple lvalues perform compare-and-swap procedure.
1849   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1850   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1851   auto *CurBB = CGF.Builder.GetInsertBlock();
1852   CGF.EmitBlock(ContBB);
1853   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1854                                              /*NumReservedValues=*/2);
1855   PHI->addIncoming(OldVal, CurBB);
1856   Address NewAtomicAddr = CreateTempAlloca();
1857   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1858   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1859       requiresMemSetZero(getAtomicAddress().getElementType())) {
1860     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1861   }
1862   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1863                                            SourceLocation(), /*AsValue=*/false);
1864   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1865   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1866   // Try to write new value using cmpxchg operation.
1867   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1868   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1869   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1870   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1871 }
1872 
1873 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1874                                   RValue UpdateRVal, Address DesiredAddr) {
1875   LValue AtomicLVal = Atomics.getAtomicLValue();
1876   LValue DesiredLVal;
1877   // Build new lvalue for temp address.
1878   if (AtomicLVal.isBitField()) {
1879     DesiredLVal =
1880         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1881                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1882                              AtomicLVal.getTBAAInfo());
1883   } else if (AtomicLVal.isVectorElt()) {
1884     DesiredLVal =
1885         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1886                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1887                               AtomicLVal.getTBAAInfo());
1888   } else {
1889     assert(AtomicLVal.isExtVectorElt());
1890     DesiredLVal = LValue::MakeExtVectorElt(
1891         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1892         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1893   }
1894   // Store new value in the corresponding memory area.
1895   assert(UpdateRVal.isScalar());
1896   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1897 }
1898 
1899 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1900                                          RValue UpdateRVal, bool IsVolatile) {
1901   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1902 
1903   Address ExpectedAddr = CreateTempAlloca();
1904 
1905   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1906   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1907   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1908   CGF.EmitBlock(ContBB);
1909   Address DesiredAddr = CreateTempAlloca();
1910   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1911       requiresMemSetZero(getAtomicAddress().getElementType())) {
1912     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1913     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1914   }
1915   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1916   auto *Res =
1917       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1918                                        DesiredAddr.getPointer(),
1919                                        AO, Failure);
1920   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1921   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1922 }
1923 
1924 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1925                                     bool IsVolatile) {
1926   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1927 
1928   // Do the atomic load.
1929   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1930   // For non-simple lvalues perform compare-and-swap procedure.
1931   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1932   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1933   auto *CurBB = CGF.Builder.GetInsertBlock();
1934   CGF.EmitBlock(ContBB);
1935   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1936                                              /*NumReservedValues=*/2);
1937   PHI->addIncoming(OldVal, CurBB);
1938   Address NewAtomicAddr = CreateTempAlloca();
1939   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1940   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1941       requiresMemSetZero(getAtomicAddress().getElementType())) {
1942     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1943   }
1944   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1945   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1946   // Try to write new value using cmpxchg operation.
1947   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1948   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1949   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1950   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1951 }
1952 
1953 void AtomicInfo::EmitAtomicUpdate(
1954     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1955     bool IsVolatile) {
1956   if (shouldUseLibcall()) {
1957     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1958   } else {
1959     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1960   }
1961 }
1962 
1963 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1964                                   bool IsVolatile) {
1965   if (shouldUseLibcall()) {
1966     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1967   } else {
1968     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1969   }
1970 }
1971 
1972 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1973                                       bool isInit) {
1974   bool IsVolatile = lvalue.isVolatileQualified();
1975   llvm::AtomicOrdering AO;
1976   if (lvalue.getType()->isAtomicType()) {
1977     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1978   } else {
1979     AO = llvm::AtomicOrdering::Release;
1980     IsVolatile = true;
1981   }
1982   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1983 }
1984 
1985 /// Emit a store to an l-value of atomic type.
1986 ///
1987 /// Note that the r-value is expected to be an r-value *of the atomic
1988 /// type*; this means that for aggregate r-values, it should include
1989 /// storage for any padding that was necessary.
1990 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1991                                       llvm::AtomicOrdering AO, bool IsVolatile,
1992                                       bool isInit) {
1993   // If this is an aggregate r-value, it should agree in type except
1994   // maybe for address-space qualification.
1995   assert(!rvalue.isAggregate() ||
1996          rvalue.getAggregateAddress().getElementType() ==
1997              dest.getAddress(*this).getElementType());
1998 
1999   AtomicInfo atomics(*this, dest);
2000   LValue LVal = atomics.getAtomicLValue();
2001 
2002   // If this is an initialization, just put the value there normally.
2003   if (LVal.isSimple()) {
2004     if (isInit) {
2005       atomics.emitCopyIntoMemory(rvalue);
2006       return;
2007     }
2008 
2009     // Check whether we should use a library call.
2010     if (atomics.shouldUseLibcall()) {
2011       // Produce a source address.
2012       Address srcAddr = atomics.materializeRValue(rvalue);
2013 
2014       // void __atomic_store(size_t size, void *mem, void *val, int order)
2015       CallArgList args;
2016       args.add(RValue::get(atomics.getAtomicSizeValue()),
2017                getContext().getSizeType());
2018       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2019                getContext().VoidPtrTy);
2020       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2021                getContext().VoidPtrTy);
2022       args.add(
2023           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2024           getContext().IntTy);
2025       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2026       return;
2027     }
2028 
2029     // Okay, we're doing this natively.
2030     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2031 
2032     // Do the atomic store.
2033     Address addr =
2034         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2035     intValue = Builder.CreateIntCast(
2036         intValue, addr.getElementType(), /*isSigned=*/false);
2037     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2038 
2039     if (AO == llvm::AtomicOrdering::Acquire)
2040       AO = llvm::AtomicOrdering::Monotonic;
2041     else if (AO == llvm::AtomicOrdering::AcquireRelease)
2042       AO = llvm::AtomicOrdering::Release;
2043     // Initializations don't need to be atomic.
2044     if (!isInit)
2045       store->setAtomic(AO);
2046 
2047     // Other decoration.
2048     if (IsVolatile)
2049       store->setVolatile(true);
2050     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2051     return;
2052   }
2053 
2054   // Emit simple atomic update operation.
2055   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2056 }
2057 
2058 /// Emit a compare-and-exchange op for atomic type.
2059 ///
2060 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2061     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2062     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2063     AggValueSlot Slot) {
2064   // If this is an aggregate r-value, it should agree in type except
2065   // maybe for address-space qualification.
2066   assert(!Expected.isAggregate() ||
2067          Expected.getAggregateAddress().getElementType() ==
2068              Obj.getAddress(*this).getElementType());
2069   assert(!Desired.isAggregate() ||
2070          Desired.getAggregateAddress().getElementType() ==
2071              Obj.getAddress(*this).getElementType());
2072   AtomicInfo Atomics(*this, Obj);
2073 
2074   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2075                                            IsWeak);
2076 }
2077 
2078 void CodeGenFunction::EmitAtomicUpdate(
2079     LValue LVal, llvm::AtomicOrdering AO,
2080     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2081   AtomicInfo Atomics(*this, LVal);
2082   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2083 }
2084 
2085 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2086   AtomicInfo atomics(*this, dest);
2087 
2088   switch (atomics.getEvaluationKind()) {
2089   case TEK_Scalar: {
2090     llvm::Value *value = EmitScalarExpr(init);
2091     atomics.emitCopyIntoMemory(RValue::get(value));
2092     return;
2093   }
2094 
2095   case TEK_Complex: {
2096     ComplexPairTy value = EmitComplexExpr(init);
2097     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2098     return;
2099   }
2100 
2101   case TEK_Aggregate: {
2102     // Fix up the destination if the initializer isn't an expression
2103     // of atomic type.
2104     bool Zeroed = false;
2105     if (!init->getType()->isAtomicType()) {
2106       Zeroed = atomics.emitMemSetZeroIfNecessary();
2107       dest = atomics.projectValue();
2108     }
2109 
2110     // Evaluate the expression directly into the destination.
2111     AggValueSlot slot = AggValueSlot::forLValue(
2112         dest, *this, AggValueSlot::IsNotDestructed,
2113         AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2114         AggValueSlot::DoesNotOverlap,
2115         Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2116 
2117     EmitAggExpr(init, slot);
2118     return;
2119   }
2120   }
2121   llvm_unreachable("bad evaluation kind");
2122 }
2123