1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         ValueTy = lvalue.getType();
77         ValueSizeInBits = C.getTypeSize(ValueTy);
78         auto &OrigBFI = lvalue.getBitFieldInfo();
79         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80         AtomicSizeInBits = C.toBits(
81             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                 .alignTo(lvalue.getAlignment()));
83         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84         auto OffsetInChars =
85             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86             lvalue.getAlignment();
87         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88             VoidPtrAddr, OffsetInChars.getQuantity());
89         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90             VoidPtrAddr,
91             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92             "atomic_bitfield_base");
93         BFI = OrigBFI;
94         BFI.Offset = Offset;
95         BFI.StorageSize = AtomicSizeInBits;
96         BFI.StorageOffset += OffsetInChars;
97         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
99                                     lvalue.getTBAAInfo());
100         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101         if (AtomicTy.isNull()) {
102           llvm::APInt Size(
103               /*numBits=*/32,
104               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105           AtomicTy =
106               C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107                                      /*IndexTypeQuals=*/0);
108         }
109         AtomicAlign = ValueAlign = lvalue.getAlignment();
110       } else if (lvalue.isVectorElt()) {
111         ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112         ValueSizeInBits = C.getTypeSize(ValueTy);
113         AtomicTy = lvalue.getType();
114         AtomicSizeInBits = C.getTypeSize(AtomicTy);
115         AtomicAlign = ValueAlign = lvalue.getAlignment();
116         LVal = lvalue;
117       } else {
118         assert(lvalue.isExtVectorElt());
119         ValueTy = lvalue.getType();
120         ValueSizeInBits = C.getTypeSize(ValueTy);
121         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122             lvalue.getType(), cast<llvm::VectorType>(
123                                   lvalue.getExtVectorAddress().getElementType())
124                                   ->getNumElements());
125         AtomicSizeInBits = C.getTypeSize(AtomicTy);
126         AtomicAlign = ValueAlign = lvalue.getAlignment();
127         LVal = lvalue;
128       }
129       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131     }
132 
133     QualType getAtomicType() const { return AtomicTy; }
134     QualType getValueType() const { return ValueTy; }
135     CharUnits getAtomicAlignment() const { return AtomicAlign; }
136     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139     bool shouldUseLibcall() const { return UseLibcall; }
140     const LValue &getAtomicLValue() const { return LVal; }
141     llvm::Value *getAtomicPointer() const {
142       if (LVal.isSimple())
143         return LVal.getPointer(CGF);
144       else if (LVal.isBitField())
145         return LVal.getBitFieldPointer();
146       else if (LVal.isVectorElt())
147         return LVal.getVectorPointer();
148       assert(LVal.isExtVectorElt());
149       return LVal.getExtVectorPointer();
150     }
151     Address getAtomicAddress() const {
152       return Address(getAtomicPointer(), getAtomicAlignment());
153     }
154 
155     Address getAtomicAddressAsAtomicIntPointer() const {
156       return emitCastToAtomicIntPointer(getAtomicAddress());
157     }
158 
159     /// Is the atomic size larger than the underlying value type?
160     ///
161     /// Note that the absence of padding does not mean that atomic
162     /// objects are completely interchangeable with non-atomic
163     /// objects: we might have promoted the alignment of a type
164     /// without making it bigger.
165     bool hasPadding() const {
166       return (ValueSizeInBits != AtomicSizeInBits);
167     }
168 
169     bool emitMemSetZeroIfNecessary() const;
170 
171     llvm::Value *getAtomicSizeValue() const {
172       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173       return CGF.CGM.getSize(size);
174     }
175 
176     /// Cast the given pointer to an integer pointer suitable for atomic
177     /// operations if the source.
178     Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180     /// If Addr is compatible with the iN that will be used for an atomic
181     /// operation, bitcast it. Otherwise, create a temporary that is suitable
182     /// and copy the value across.
183     Address convertToAtomicIntPointer(Address Addr) const;
184 
185     /// Turn an atomic-layout object into an r-value.
186     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187                                      SourceLocation loc, bool AsValue) const;
188 
189     /// Converts a rvalue to integer value.
190     llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193                                      AggValueSlot ResultSlot,
194                                      SourceLocation Loc, bool AsValue) const;
195 
196     /// Copy an atomic r-value into atomic-layout memory.
197     void emitCopyIntoMemory(RValue rvalue) const;
198 
199     /// Project an l-value down to the value field.
200     LValue projectValue() const {
201       assert(LVal.isSimple());
202       Address addr = getAtomicAddress();
203       if (hasPadding())
204         addr = CGF.Builder.CreateStructGEP(addr, 0);
205 
206       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207                               LVal.getBaseInfo(), LVal.getTBAAInfo());
208     }
209 
210     /// Emits atomic load.
211     /// \returns Loaded value.
212     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213                           bool AsValue, llvm::AtomicOrdering AO,
214                           bool IsVolatile);
215 
216     /// Emits atomic compare-and-exchange sequence.
217     /// \param Expected Expected value.
218     /// \param Desired Desired value.
219     /// \param Success Atomic ordering for success operation.
220     /// \param Failure Atomic ordering for failed operation.
221     /// \param IsWeak true if atomic operation is weak, false otherwise.
222     /// \returns Pair of values: previous value from storage (value type) and
223     /// boolean flag (i1 type) with true if success and false otherwise.
224     std::pair<RValue, llvm::Value *>
225     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226                               llvm::AtomicOrdering Success =
227                                   llvm::AtomicOrdering::SequentiallyConsistent,
228                               llvm::AtomicOrdering Failure =
229                                   llvm::AtomicOrdering::SequentiallyConsistent,
230                               bool IsWeak = false);
231 
232     /// Emits atomic update.
233     /// \param AO Atomic ordering.
234     /// \param UpdateOp Update operation for the current lvalue.
235     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
237                           bool IsVolatile);
238     /// Emits atomic update.
239     /// \param AO Atomic ordering.
240     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241                           bool IsVolatile);
242 
243     /// Materialize an atomic r-value in atomic-layout memory.
244     Address materializeRValue(RValue rvalue) const;
245 
246     /// Creates temp alloca for intermediate operations on atomic value.
247     Address CreateTempAlloca() const;
248   private:
249     bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252     /// Emits atomic load as a libcall.
253     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254                                llvm::AtomicOrdering AO, bool IsVolatile);
255     /// Emits atomic load as LLVM instruction.
256     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257     /// Emits atomic compare-and-exchange op as a libcall.
258     llvm::Value *EmitAtomicCompareExchangeLibcall(
259         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260         llvm::AtomicOrdering Success =
261             llvm::AtomicOrdering::SequentiallyConsistent,
262         llvm::AtomicOrdering Failure =
263             llvm::AtomicOrdering::SequentiallyConsistent);
264     /// Emits atomic compare-and-exchange op as LLVM instruction.
265     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267         llvm::AtomicOrdering Success =
268             llvm::AtomicOrdering::SequentiallyConsistent,
269         llvm::AtomicOrdering Failure =
270             llvm::AtomicOrdering::SequentiallyConsistent,
271         bool IsWeak = false);
272     /// Emit atomic update as libcalls.
273     void
274     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
276                             bool IsVolatile);
277     /// Emit atomic update as LLVM instructions.
278     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
280                             bool IsVolatile);
281     /// Emit atomic update as libcalls.
282     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283                                  bool IsVolatile);
284     /// Emit atomic update as LLVM instructions.
285     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286                             bool IsVolatile);
287   };
288 }
289 
290 Address AtomicInfo::CreateTempAlloca() const {
291   Address TempAlloca = CGF.CreateMemTemp(
292       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293                                                                 : AtomicTy,
294       getAtomicAlignment(),
295       "atomic-temp");
296   // Cast to pointer to value type for bitfields.
297   if (LVal.isBitField())
298     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299         TempAlloca, getAtomicAddress().getType());
300   return TempAlloca;
301 }
302 
303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304                                 StringRef fnName,
305                                 QualType resultType,
306                                 CallArgList &args) {
307   const CGFunctionInfo &fnInfo =
308     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310   llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
311   auto callee = CGCallee::forDirect(fn);
312   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
313 }
314 
315 /// Does a store of the given IR type modify the full expected width?
316 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
317                            uint64_t expectedSize) {
318   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
319 }
320 
321 /// Does the atomic type require memsetting to zero before initialization?
322 ///
323 /// The IR type is provided as a way of making certain queries faster.
324 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
325   // If the atomic type has size padding, we definitely need a memset.
326   if (hasPadding()) return true;
327 
328   // Otherwise, do some simple heuristics to try to avoid it:
329   switch (getEvaluationKind()) {
330   // For scalars and complexes, check whether the store size of the
331   // type uses the full size.
332   case TEK_Scalar:
333     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
334   case TEK_Complex:
335     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
336                            AtomicSizeInBits / 2);
337 
338   // Padding in structs has an undefined bit pattern.  User beware.
339   case TEK_Aggregate:
340     return false;
341   }
342   llvm_unreachable("bad evaluation kind");
343 }
344 
345 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
346   assert(LVal.isSimple());
347   llvm::Value *addr = LVal.getPointer(CGF);
348   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
349     return false;
350 
351   CGF.Builder.CreateMemSet(
352       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
353       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
354       LVal.getAlignment().getAsAlign());
355   return true;
356 }
357 
358 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
359                               Address Dest, Address Ptr,
360                               Address Val1, Address Val2,
361                               uint64_t Size,
362                               llvm::AtomicOrdering SuccessOrder,
363                               llvm::AtomicOrdering FailureOrder,
364                               llvm::SyncScope::ID Scope) {
365   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
366   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
367   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
368 
369   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
370       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
371       Scope);
372   Pair->setVolatile(E->isVolatile());
373   Pair->setWeak(IsWeak);
374 
375   // Cmp holds the result of the compare-exchange operation: true on success,
376   // false on failure.
377   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
378   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
379 
380   // This basic block is used to hold the store instruction if the operation
381   // failed.
382   llvm::BasicBlock *StoreExpectedBB =
383       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
384 
385   // This basic block is the exit point of the operation, we should end up
386   // here regardless of whether or not the operation succeeded.
387   llvm::BasicBlock *ContinueBB =
388       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
389 
390   // Update Expected if Expected isn't equal to Old, otherwise branch to the
391   // exit point.
392   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
393 
394   CGF.Builder.SetInsertPoint(StoreExpectedBB);
395   // Update the memory at Expected with Old's value.
396   CGF.Builder.CreateStore(Old, Val1);
397   // Finally, branch to the exit point.
398   CGF.Builder.CreateBr(ContinueBB);
399 
400   CGF.Builder.SetInsertPoint(ContinueBB);
401   // Update the memory at Dest with Cmp's value.
402   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
403 }
404 
405 /// Given an ordering required on success, emit all possible cmpxchg
406 /// instructions to cope with the provided (but possibly only dynamically known)
407 /// FailureOrder.
408 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
409                                         bool IsWeak, Address Dest, Address Ptr,
410                                         Address Val1, Address Val2,
411                                         llvm::Value *FailureOrderVal,
412                                         uint64_t Size,
413                                         llvm::AtomicOrdering SuccessOrder,
414                                         llvm::SyncScope::ID Scope) {
415   llvm::AtomicOrdering FailureOrder;
416   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
417     auto FOS = FO->getSExtValue();
418     if (!llvm::isValidAtomicOrderingCABI(FOS))
419       FailureOrder = llvm::AtomicOrdering::Monotonic;
420     else
421       switch ((llvm::AtomicOrderingCABI)FOS) {
422       case llvm::AtomicOrderingCABI::relaxed:
423       case llvm::AtomicOrderingCABI::release:
424       case llvm::AtomicOrderingCABI::acq_rel:
425         FailureOrder = llvm::AtomicOrdering::Monotonic;
426         break;
427       case llvm::AtomicOrderingCABI::consume:
428       case llvm::AtomicOrderingCABI::acquire:
429         FailureOrder = llvm::AtomicOrdering::Acquire;
430         break;
431       case llvm::AtomicOrderingCABI::seq_cst:
432         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
433         break;
434       }
435     if (isStrongerThan(FailureOrder, SuccessOrder)) {
436       // Don't assert on undefined behavior "failure argument shall be no
437       // stronger than the success argument".
438       FailureOrder =
439           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
440     }
441     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
442                       FailureOrder, Scope);
443     return;
444   }
445 
446   // Create all the relevant BB's
447   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
448                    *SeqCstBB = nullptr;
449   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
450   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
451       SuccessOrder != llvm::AtomicOrdering::Release)
452     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
453   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
454     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
455 
456   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
457 
458   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
459 
460   // Emit all the different atomics
461 
462   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
463   // doesn't matter unless someone is crazy enough to use something that
464   // doesn't fold to a constant for the ordering.
465   CGF.Builder.SetInsertPoint(MonotonicBB);
466   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
467                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
468   CGF.Builder.CreateBr(ContBB);
469 
470   if (AcquireBB) {
471     CGF.Builder.SetInsertPoint(AcquireBB);
472     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
473                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
474     CGF.Builder.CreateBr(ContBB);
475     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
476                 AcquireBB);
477     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
478                 AcquireBB);
479   }
480   if (SeqCstBB) {
481     CGF.Builder.SetInsertPoint(SeqCstBB);
482     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
483                       llvm::AtomicOrdering::SequentiallyConsistent, Scope);
484     CGF.Builder.CreateBr(ContBB);
485     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
486                 SeqCstBB);
487   }
488 
489   CGF.Builder.SetInsertPoint(ContBB);
490 }
491 
492 /// Duplicate the atomic min/max operation in conventional IR for the builtin
493 /// variants that return the new rather than the original value.
494 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
495                                          AtomicExpr::AtomicOp Op,
496                                          bool IsSigned,
497                                          llvm::Value *OldVal,
498                                          llvm::Value *RHS) {
499   llvm::CmpInst::Predicate Pred;
500   switch (Op) {
501   default:
502     llvm_unreachable("Unexpected min/max operation");
503   case AtomicExpr::AO__atomic_max_fetch:
504     Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
505     break;
506   case AtomicExpr::AO__atomic_min_fetch:
507     Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
508     break;
509   }
510   llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
511   return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
512 }
513 
514 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
515                          Address Ptr, Address Val1, Address Val2,
516                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
517                          uint64_t Size, llvm::AtomicOrdering Order,
518                          llvm::SyncScope::ID Scope) {
519   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
520   bool PostOpMinMax = false;
521   unsigned PostOp = 0;
522 
523   switch (E->getOp()) {
524   case AtomicExpr::AO__c11_atomic_init:
525   case AtomicExpr::AO__opencl_atomic_init:
526     llvm_unreachable("Already handled!");
527 
528   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
529   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
530     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
531                                 FailureOrder, Size, Order, Scope);
532     return;
533   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
534   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
535     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
536                                 FailureOrder, Size, Order, Scope);
537     return;
538   case AtomicExpr::AO__atomic_compare_exchange:
539   case AtomicExpr::AO__atomic_compare_exchange_n: {
540     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
541       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
542                                   Val1, Val2, FailureOrder, Size, Order, Scope);
543     } else {
544       // Create all the relevant BB's
545       llvm::BasicBlock *StrongBB =
546           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
547       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
548       llvm::BasicBlock *ContBB =
549           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
550 
551       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
552       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
553 
554       CGF.Builder.SetInsertPoint(StrongBB);
555       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
556                                   FailureOrder, Size, Order, Scope);
557       CGF.Builder.CreateBr(ContBB);
558 
559       CGF.Builder.SetInsertPoint(WeakBB);
560       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
561                                   FailureOrder, Size, Order, Scope);
562       CGF.Builder.CreateBr(ContBB);
563 
564       CGF.Builder.SetInsertPoint(ContBB);
565     }
566     return;
567   }
568   case AtomicExpr::AO__c11_atomic_load:
569   case AtomicExpr::AO__opencl_atomic_load:
570   case AtomicExpr::AO__atomic_load_n:
571   case AtomicExpr::AO__atomic_load: {
572     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
573     Load->setAtomic(Order, Scope);
574     Load->setVolatile(E->isVolatile());
575     CGF.Builder.CreateStore(Load, Dest);
576     return;
577   }
578 
579   case AtomicExpr::AO__c11_atomic_store:
580   case AtomicExpr::AO__opencl_atomic_store:
581   case AtomicExpr::AO__atomic_store:
582   case AtomicExpr::AO__atomic_store_n: {
583     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
584     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
585     Store->setAtomic(Order, Scope);
586     Store->setVolatile(E->isVolatile());
587     return;
588   }
589 
590   case AtomicExpr::AO__c11_atomic_exchange:
591   case AtomicExpr::AO__opencl_atomic_exchange:
592   case AtomicExpr::AO__atomic_exchange_n:
593   case AtomicExpr::AO__atomic_exchange:
594     Op = llvm::AtomicRMWInst::Xchg;
595     break;
596 
597   case AtomicExpr::AO__atomic_add_fetch:
598     PostOp = llvm::Instruction::Add;
599     LLVM_FALLTHROUGH;
600   case AtomicExpr::AO__c11_atomic_fetch_add:
601   case AtomicExpr::AO__opencl_atomic_fetch_add:
602   case AtomicExpr::AO__atomic_fetch_add:
603     Op = llvm::AtomicRMWInst::Add;
604     break;
605 
606   case AtomicExpr::AO__atomic_sub_fetch:
607     PostOp = llvm::Instruction::Sub;
608     LLVM_FALLTHROUGH;
609   case AtomicExpr::AO__c11_atomic_fetch_sub:
610   case AtomicExpr::AO__opencl_atomic_fetch_sub:
611   case AtomicExpr::AO__atomic_fetch_sub:
612     Op = llvm::AtomicRMWInst::Sub;
613     break;
614 
615   case AtomicExpr::AO__atomic_min_fetch:
616     PostOpMinMax = true;
617     LLVM_FALLTHROUGH;
618   case AtomicExpr::AO__c11_atomic_fetch_min:
619   case AtomicExpr::AO__opencl_atomic_fetch_min:
620   case AtomicExpr::AO__atomic_fetch_min:
621     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
622                                                   : llvm::AtomicRMWInst::UMin;
623     break;
624 
625   case AtomicExpr::AO__atomic_max_fetch:
626     PostOpMinMax = true;
627     LLVM_FALLTHROUGH;
628   case AtomicExpr::AO__c11_atomic_fetch_max:
629   case AtomicExpr::AO__opencl_atomic_fetch_max:
630   case AtomicExpr::AO__atomic_fetch_max:
631     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
632                                                   : llvm::AtomicRMWInst::UMax;
633     break;
634 
635   case AtomicExpr::AO__atomic_and_fetch:
636     PostOp = llvm::Instruction::And;
637     LLVM_FALLTHROUGH;
638   case AtomicExpr::AO__c11_atomic_fetch_and:
639   case AtomicExpr::AO__opencl_atomic_fetch_and:
640   case AtomicExpr::AO__atomic_fetch_and:
641     Op = llvm::AtomicRMWInst::And;
642     break;
643 
644   case AtomicExpr::AO__atomic_or_fetch:
645     PostOp = llvm::Instruction::Or;
646     LLVM_FALLTHROUGH;
647   case AtomicExpr::AO__c11_atomic_fetch_or:
648   case AtomicExpr::AO__opencl_atomic_fetch_or:
649   case AtomicExpr::AO__atomic_fetch_or:
650     Op = llvm::AtomicRMWInst::Or;
651     break;
652 
653   case AtomicExpr::AO__atomic_xor_fetch:
654     PostOp = llvm::Instruction::Xor;
655     LLVM_FALLTHROUGH;
656   case AtomicExpr::AO__c11_atomic_fetch_xor:
657   case AtomicExpr::AO__opencl_atomic_fetch_xor:
658   case AtomicExpr::AO__atomic_fetch_xor:
659     Op = llvm::AtomicRMWInst::Xor;
660     break;
661 
662   case AtomicExpr::AO__atomic_nand_fetch:
663     PostOp = llvm::Instruction::And; // the NOT is special cased below
664     LLVM_FALLTHROUGH;
665   case AtomicExpr::AO__atomic_fetch_nand:
666     Op = llvm::AtomicRMWInst::Nand;
667     break;
668   }
669 
670   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
671   llvm::AtomicRMWInst *RMWI =
672       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
673   RMWI->setVolatile(E->isVolatile());
674 
675   // For __atomic_*_fetch operations, perform the operation again to
676   // determine the value which was written.
677   llvm::Value *Result = RMWI;
678   if (PostOpMinMax)
679     Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
680                                   E->getValueType()->isSignedIntegerType(),
681                                   RMWI, LoadVal1);
682   else if (PostOp)
683     Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
684                                      LoadVal1);
685   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
686     Result = CGF.Builder.CreateNot(Result);
687   CGF.Builder.CreateStore(Result, Dest);
688 }
689 
690 // This function emits any expression (scalar, complex, or aggregate)
691 // into a temporary alloca.
692 static Address
693 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
694   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
695   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
696                        /*Init*/ true);
697   return DeclPtr;
698 }
699 
700 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
701                          Address Ptr, Address Val1, Address Val2,
702                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
703                          uint64_t Size, llvm::AtomicOrdering Order,
704                          llvm::Value *Scope) {
705   auto ScopeModel = Expr->getScopeModel();
706 
707   // LLVM atomic instructions always have synch scope. If clang atomic
708   // expression has no scope operand, use default LLVM synch scope.
709   if (!ScopeModel) {
710     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
711                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
712     return;
713   }
714 
715   // Handle constant scope.
716   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
717     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
718         CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
719         Order, CGF.CGM.getLLVMContext());
720     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
721                  Order, SCID);
722     return;
723   }
724 
725   // Handle non-constant scope.
726   auto &Builder = CGF.Builder;
727   auto Scopes = ScopeModel->getRuntimeValues();
728   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
729   for (auto S : Scopes)
730     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
731 
732   llvm::BasicBlock *ContBB =
733       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
734 
735   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
736   // If unsupported synch scope is encountered at run time, assume a fallback
737   // synch scope value.
738   auto FallBack = ScopeModel->getFallBackValue();
739   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
740   for (auto S : Scopes) {
741     auto *B = BB[S];
742     if (S != FallBack)
743       SI->addCase(Builder.getInt32(S), B);
744 
745     Builder.SetInsertPoint(B);
746     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
747                  Order,
748                  CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
749                                                          ScopeModel->map(S),
750                                                          Order,
751                                                          CGF.getLLVMContext()));
752     Builder.CreateBr(ContBB);
753   }
754 
755   Builder.SetInsertPoint(ContBB);
756 }
757 
758 static void
759 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
760                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
761                   SourceLocation Loc, CharUnits SizeInChars) {
762   if (UseOptimizedLibcall) {
763     // Load value and pass it to the function directly.
764     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
765     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
766     ValTy =
767         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
768     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
769                                                 SizeInBits)->getPointerTo();
770     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
771     Val = CGF.EmitLoadOfScalar(Ptr, false,
772                                CGF.getContext().getPointerType(ValTy),
773                                Loc);
774     // Coerce the value into an appropriately sized integer type.
775     Args.add(RValue::get(Val), ValTy);
776   } else {
777     // Non-optimized functions always take a reference.
778     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
779                          CGF.getContext().VoidPtrTy);
780   }
781 }
782 
783 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
784   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
785   QualType MemTy = AtomicTy;
786   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
787     MemTy = AT->getValueType();
788   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
789 
790   Address Val1 = Address::invalid();
791   Address Val2 = Address::invalid();
792   Address Dest = Address::invalid();
793   Address Ptr = EmitPointerWithAlignment(E->getPtr());
794 
795   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
796       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
797     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
798     EmitAtomicInit(E->getVal1(), lvalue);
799     return RValue::get(nullptr);
800   }
801 
802   CharUnits sizeChars, alignChars;
803   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
804   uint64_t Size = sizeChars.getQuantity();
805   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
806 
807   bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
808   bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
809   bool UseLibcall = Misaligned | Oversized;
810   CharUnits MaxInlineWidth =
811       getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
812 
813   DiagnosticsEngine &Diags = CGM.getDiags();
814 
815   if (Misaligned) {
816     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
817         << (int)sizeChars.getQuantity()
818         << (int)Ptr.getAlignment().getQuantity();
819   }
820 
821   if (Oversized) {
822     Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
823         << (int)sizeChars.getQuantity() << (int)MaxInlineWidth.getQuantity();
824   }
825 
826   llvm::Value *Order = EmitScalarExpr(E->getOrder());
827   llvm::Value *Scope =
828       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
829 
830   switch (E->getOp()) {
831   case AtomicExpr::AO__c11_atomic_init:
832   case AtomicExpr::AO__opencl_atomic_init:
833     llvm_unreachable("Already handled above with EmitAtomicInit!");
834 
835   case AtomicExpr::AO__c11_atomic_load:
836   case AtomicExpr::AO__opencl_atomic_load:
837   case AtomicExpr::AO__atomic_load_n:
838     break;
839 
840   case AtomicExpr::AO__atomic_load:
841     Dest = EmitPointerWithAlignment(E->getVal1());
842     break;
843 
844   case AtomicExpr::AO__atomic_store:
845     Val1 = EmitPointerWithAlignment(E->getVal1());
846     break;
847 
848   case AtomicExpr::AO__atomic_exchange:
849     Val1 = EmitPointerWithAlignment(E->getVal1());
850     Dest = EmitPointerWithAlignment(E->getVal2());
851     break;
852 
853   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
854   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
855   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
856   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
857   case AtomicExpr::AO__atomic_compare_exchange_n:
858   case AtomicExpr::AO__atomic_compare_exchange:
859     Val1 = EmitPointerWithAlignment(E->getVal1());
860     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
861       Val2 = EmitPointerWithAlignment(E->getVal2());
862     else
863       Val2 = EmitValToTemp(*this, E->getVal2());
864     OrderFail = EmitScalarExpr(E->getOrderFail());
865     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
866         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
867       IsWeak = EmitScalarExpr(E->getWeak());
868     break;
869 
870   case AtomicExpr::AO__c11_atomic_fetch_add:
871   case AtomicExpr::AO__c11_atomic_fetch_sub:
872   case AtomicExpr::AO__opencl_atomic_fetch_add:
873   case AtomicExpr::AO__opencl_atomic_fetch_sub:
874     if (MemTy->isPointerType()) {
875       // For pointer arithmetic, we're required to do a bit of math:
876       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
877       // ... but only for the C11 builtins. The GNU builtins expect the
878       // user to multiply by sizeof(T).
879       QualType Val1Ty = E->getVal1()->getType();
880       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
881       CharUnits PointeeIncAmt =
882           getContext().getTypeSizeInChars(MemTy->getPointeeType());
883       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
884       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
885       Val1 = Temp;
886       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
887       break;
888     }
889       LLVM_FALLTHROUGH;
890   case AtomicExpr::AO__atomic_fetch_add:
891   case AtomicExpr::AO__atomic_fetch_sub:
892   case AtomicExpr::AO__atomic_add_fetch:
893   case AtomicExpr::AO__atomic_sub_fetch:
894   case AtomicExpr::AO__c11_atomic_store:
895   case AtomicExpr::AO__c11_atomic_exchange:
896   case AtomicExpr::AO__opencl_atomic_store:
897   case AtomicExpr::AO__opencl_atomic_exchange:
898   case AtomicExpr::AO__atomic_store_n:
899   case AtomicExpr::AO__atomic_exchange_n:
900   case AtomicExpr::AO__c11_atomic_fetch_and:
901   case AtomicExpr::AO__c11_atomic_fetch_or:
902   case AtomicExpr::AO__c11_atomic_fetch_xor:
903   case AtomicExpr::AO__c11_atomic_fetch_max:
904   case AtomicExpr::AO__c11_atomic_fetch_min:
905   case AtomicExpr::AO__opencl_atomic_fetch_and:
906   case AtomicExpr::AO__opencl_atomic_fetch_or:
907   case AtomicExpr::AO__opencl_atomic_fetch_xor:
908   case AtomicExpr::AO__opencl_atomic_fetch_min:
909   case AtomicExpr::AO__opencl_atomic_fetch_max:
910   case AtomicExpr::AO__atomic_fetch_and:
911   case AtomicExpr::AO__atomic_fetch_or:
912   case AtomicExpr::AO__atomic_fetch_xor:
913   case AtomicExpr::AO__atomic_fetch_nand:
914   case AtomicExpr::AO__atomic_and_fetch:
915   case AtomicExpr::AO__atomic_or_fetch:
916   case AtomicExpr::AO__atomic_xor_fetch:
917   case AtomicExpr::AO__atomic_nand_fetch:
918   case AtomicExpr::AO__atomic_max_fetch:
919   case AtomicExpr::AO__atomic_min_fetch:
920   case AtomicExpr::AO__atomic_fetch_max:
921   case AtomicExpr::AO__atomic_fetch_min:
922     Val1 = EmitValToTemp(*this, E->getVal1());
923     break;
924   }
925 
926   QualType RValTy = E->getType().getUnqualifiedType();
927 
928   // The inlined atomics only function on iN types, where N is a power of 2. We
929   // need to make sure (via temporaries if necessary) that all incoming values
930   // are compatible.
931   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
932   AtomicInfo Atomics(*this, AtomicVal);
933 
934   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
935   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
936   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
937   if (Dest.isValid())
938     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
939   else if (E->isCmpXChg())
940     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
941   else if (!RValTy->isVoidType())
942     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
943 
944   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
945   if (UseLibcall) {
946     bool UseOptimizedLibcall = false;
947     switch (E->getOp()) {
948     case AtomicExpr::AO__c11_atomic_init:
949     case AtomicExpr::AO__opencl_atomic_init:
950       llvm_unreachable("Already handled above with EmitAtomicInit!");
951 
952     case AtomicExpr::AO__c11_atomic_fetch_add:
953     case AtomicExpr::AO__opencl_atomic_fetch_add:
954     case AtomicExpr::AO__atomic_fetch_add:
955     case AtomicExpr::AO__c11_atomic_fetch_and:
956     case AtomicExpr::AO__opencl_atomic_fetch_and:
957     case AtomicExpr::AO__atomic_fetch_and:
958     case AtomicExpr::AO__c11_atomic_fetch_or:
959     case AtomicExpr::AO__opencl_atomic_fetch_or:
960     case AtomicExpr::AO__atomic_fetch_or:
961     case AtomicExpr::AO__atomic_fetch_nand:
962     case AtomicExpr::AO__c11_atomic_fetch_sub:
963     case AtomicExpr::AO__opencl_atomic_fetch_sub:
964     case AtomicExpr::AO__atomic_fetch_sub:
965     case AtomicExpr::AO__c11_atomic_fetch_xor:
966     case AtomicExpr::AO__opencl_atomic_fetch_xor:
967     case AtomicExpr::AO__opencl_atomic_fetch_min:
968     case AtomicExpr::AO__opencl_atomic_fetch_max:
969     case AtomicExpr::AO__atomic_fetch_xor:
970     case AtomicExpr::AO__c11_atomic_fetch_max:
971     case AtomicExpr::AO__c11_atomic_fetch_min:
972     case AtomicExpr::AO__atomic_add_fetch:
973     case AtomicExpr::AO__atomic_and_fetch:
974     case AtomicExpr::AO__atomic_nand_fetch:
975     case AtomicExpr::AO__atomic_or_fetch:
976     case AtomicExpr::AO__atomic_sub_fetch:
977     case AtomicExpr::AO__atomic_xor_fetch:
978     case AtomicExpr::AO__atomic_fetch_max:
979     case AtomicExpr::AO__atomic_fetch_min:
980     case AtomicExpr::AO__atomic_max_fetch:
981     case AtomicExpr::AO__atomic_min_fetch:
982       // For these, only library calls for certain sizes exist.
983       UseOptimizedLibcall = true;
984       break;
985 
986     case AtomicExpr::AO__atomic_load:
987     case AtomicExpr::AO__atomic_store:
988     case AtomicExpr::AO__atomic_exchange:
989     case AtomicExpr::AO__atomic_compare_exchange:
990       // Use the generic version if we don't know that the operand will be
991       // suitably aligned for the optimized version.
992       if (Misaligned)
993         break;
994       LLVM_FALLTHROUGH;
995     case AtomicExpr::AO__c11_atomic_load:
996     case AtomicExpr::AO__c11_atomic_store:
997     case AtomicExpr::AO__c11_atomic_exchange:
998     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
999     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1000     case AtomicExpr::AO__opencl_atomic_load:
1001     case AtomicExpr::AO__opencl_atomic_store:
1002     case AtomicExpr::AO__opencl_atomic_exchange:
1003     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1004     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1005     case AtomicExpr::AO__atomic_load_n:
1006     case AtomicExpr::AO__atomic_store_n:
1007     case AtomicExpr::AO__atomic_exchange_n:
1008     case AtomicExpr::AO__atomic_compare_exchange_n:
1009       // Only use optimized library calls for sizes for which they exist.
1010       // FIXME: Size == 16 optimized library functions exist too.
1011       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1012         UseOptimizedLibcall = true;
1013       break;
1014     }
1015 
1016     CallArgList Args;
1017     if (!UseOptimizedLibcall) {
1018       // For non-optimized library calls, the size is the first parameter
1019       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1020                getContext().getSizeType());
1021     }
1022     // Atomic address is the first or second parameter
1023     // The OpenCL atomic library functions only accept pointer arguments to
1024     // generic address space.
1025     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1026       if (!E->isOpenCL())
1027         return V;
1028       auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1029       if (AS == LangAS::opencl_generic)
1030         return V;
1031       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1032       auto T = V->getType();
1033       auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1034 
1035       return getTargetHooks().performAddrSpaceCast(
1036           *this, V, AS, LangAS::opencl_generic, DestType, false);
1037     };
1038 
1039     Args.add(RValue::get(CastToGenericAddrSpace(
1040                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1041              getContext().VoidPtrTy);
1042 
1043     std::string LibCallName;
1044     QualType LoweredMemTy =
1045       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1046     QualType RetTy;
1047     bool HaveRetTy = false;
1048     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1049     bool PostOpMinMax = false;
1050     switch (E->getOp()) {
1051     case AtomicExpr::AO__c11_atomic_init:
1052     case AtomicExpr::AO__opencl_atomic_init:
1053       llvm_unreachable("Already handled!");
1054 
1055     // There is only one libcall for compare an exchange, because there is no
1056     // optimisation benefit possible from a libcall version of a weak compare
1057     // and exchange.
1058     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1059     //                                void *desired, int success, int failure)
1060     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1061     //                                  int success, int failure)
1062     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1063     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1064     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1065     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1066     case AtomicExpr::AO__atomic_compare_exchange:
1067     case AtomicExpr::AO__atomic_compare_exchange_n:
1068       LibCallName = "__atomic_compare_exchange";
1069       RetTy = getContext().BoolTy;
1070       HaveRetTy = true;
1071       Args.add(
1072           RValue::get(CastToGenericAddrSpace(
1073               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1074           getContext().VoidPtrTy);
1075       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1076                         MemTy, E->getExprLoc(), sizeChars);
1077       Args.add(RValue::get(Order), getContext().IntTy);
1078       Order = OrderFail;
1079       break;
1080     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1081     //                        int order)
1082     // T __atomic_exchange_N(T *mem, T val, int order)
1083     case AtomicExpr::AO__c11_atomic_exchange:
1084     case AtomicExpr::AO__opencl_atomic_exchange:
1085     case AtomicExpr::AO__atomic_exchange_n:
1086     case AtomicExpr::AO__atomic_exchange:
1087       LibCallName = "__atomic_exchange";
1088       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1089                         MemTy, E->getExprLoc(), sizeChars);
1090       break;
1091     // void __atomic_store(size_t size, void *mem, void *val, int order)
1092     // void __atomic_store_N(T *mem, T val, int order)
1093     case AtomicExpr::AO__c11_atomic_store:
1094     case AtomicExpr::AO__opencl_atomic_store:
1095     case AtomicExpr::AO__atomic_store:
1096     case AtomicExpr::AO__atomic_store_n:
1097       LibCallName = "__atomic_store";
1098       RetTy = getContext().VoidTy;
1099       HaveRetTy = true;
1100       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1101                         MemTy, E->getExprLoc(), sizeChars);
1102       break;
1103     // void __atomic_load(size_t size, void *mem, void *return, int order)
1104     // T __atomic_load_N(T *mem, int order)
1105     case AtomicExpr::AO__c11_atomic_load:
1106     case AtomicExpr::AO__opencl_atomic_load:
1107     case AtomicExpr::AO__atomic_load:
1108     case AtomicExpr::AO__atomic_load_n:
1109       LibCallName = "__atomic_load";
1110       break;
1111     // T __atomic_add_fetch_N(T *mem, T val, int order)
1112     // T __atomic_fetch_add_N(T *mem, T val, int order)
1113     case AtomicExpr::AO__atomic_add_fetch:
1114       PostOp = llvm::Instruction::Add;
1115       LLVM_FALLTHROUGH;
1116     case AtomicExpr::AO__c11_atomic_fetch_add:
1117     case AtomicExpr::AO__opencl_atomic_fetch_add:
1118     case AtomicExpr::AO__atomic_fetch_add:
1119       LibCallName = "__atomic_fetch_add";
1120       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1121                         LoweredMemTy, E->getExprLoc(), sizeChars);
1122       break;
1123     // T __atomic_and_fetch_N(T *mem, T val, int order)
1124     // T __atomic_fetch_and_N(T *mem, T val, int order)
1125     case AtomicExpr::AO__atomic_and_fetch:
1126       PostOp = llvm::Instruction::And;
1127       LLVM_FALLTHROUGH;
1128     case AtomicExpr::AO__c11_atomic_fetch_and:
1129     case AtomicExpr::AO__opencl_atomic_fetch_and:
1130     case AtomicExpr::AO__atomic_fetch_and:
1131       LibCallName = "__atomic_fetch_and";
1132       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1133                         MemTy, E->getExprLoc(), sizeChars);
1134       break;
1135     // T __atomic_or_fetch_N(T *mem, T val, int order)
1136     // T __atomic_fetch_or_N(T *mem, T val, int order)
1137     case AtomicExpr::AO__atomic_or_fetch:
1138       PostOp = llvm::Instruction::Or;
1139       LLVM_FALLTHROUGH;
1140     case AtomicExpr::AO__c11_atomic_fetch_or:
1141     case AtomicExpr::AO__opencl_atomic_fetch_or:
1142     case AtomicExpr::AO__atomic_fetch_or:
1143       LibCallName = "__atomic_fetch_or";
1144       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1145                         MemTy, E->getExprLoc(), sizeChars);
1146       break;
1147     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1148     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1149     case AtomicExpr::AO__atomic_sub_fetch:
1150       PostOp = llvm::Instruction::Sub;
1151       LLVM_FALLTHROUGH;
1152     case AtomicExpr::AO__c11_atomic_fetch_sub:
1153     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1154     case AtomicExpr::AO__atomic_fetch_sub:
1155       LibCallName = "__atomic_fetch_sub";
1156       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1157                         LoweredMemTy, E->getExprLoc(), sizeChars);
1158       break;
1159     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1160     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1161     case AtomicExpr::AO__atomic_xor_fetch:
1162       PostOp = llvm::Instruction::Xor;
1163       LLVM_FALLTHROUGH;
1164     case AtomicExpr::AO__c11_atomic_fetch_xor:
1165     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1166     case AtomicExpr::AO__atomic_fetch_xor:
1167       LibCallName = "__atomic_fetch_xor";
1168       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1169                         MemTy, E->getExprLoc(), sizeChars);
1170       break;
1171     case AtomicExpr::AO__atomic_min_fetch:
1172       PostOpMinMax = true;
1173       LLVM_FALLTHROUGH;
1174     case AtomicExpr::AO__c11_atomic_fetch_min:
1175     case AtomicExpr::AO__atomic_fetch_min:
1176     case AtomicExpr::AO__opencl_atomic_fetch_min:
1177       LibCallName = E->getValueType()->isSignedIntegerType()
1178                         ? "__atomic_fetch_min"
1179                         : "__atomic_fetch_umin";
1180       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1181                         LoweredMemTy, E->getExprLoc(), sizeChars);
1182       break;
1183     case AtomicExpr::AO__atomic_max_fetch:
1184       PostOpMinMax = true;
1185       LLVM_FALLTHROUGH;
1186     case AtomicExpr::AO__c11_atomic_fetch_max:
1187     case AtomicExpr::AO__atomic_fetch_max:
1188     case AtomicExpr::AO__opencl_atomic_fetch_max:
1189       LibCallName = E->getValueType()->isSignedIntegerType()
1190                         ? "__atomic_fetch_max"
1191                         : "__atomic_fetch_umax";
1192       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1193                         LoweredMemTy, E->getExprLoc(), sizeChars);
1194       break;
1195     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1196     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1197     case AtomicExpr::AO__atomic_nand_fetch:
1198       PostOp = llvm::Instruction::And; // the NOT is special cased below
1199       LLVM_FALLTHROUGH;
1200     case AtomicExpr::AO__atomic_fetch_nand:
1201       LibCallName = "__atomic_fetch_nand";
1202       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1203                         MemTy, E->getExprLoc(), sizeChars);
1204       break;
1205     }
1206 
1207     if (E->isOpenCL()) {
1208       LibCallName = std::string("__opencl") +
1209           StringRef(LibCallName).drop_front(1).str();
1210 
1211     }
1212     // Optimized functions have the size in their name.
1213     if (UseOptimizedLibcall)
1214       LibCallName += "_" + llvm::utostr(Size);
1215     // By default, assume we return a value of the atomic type.
1216     if (!HaveRetTy) {
1217       if (UseOptimizedLibcall) {
1218         // Value is returned directly.
1219         // The function returns an appropriately sized integer type.
1220         RetTy = getContext().getIntTypeForBitwidth(
1221             getContext().toBits(sizeChars), /*Signed=*/false);
1222       } else {
1223         // Value is returned through parameter before the order.
1224         RetTy = getContext().VoidTy;
1225         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1226                  getContext().VoidPtrTy);
1227       }
1228     }
1229     // order is always the last parameter
1230     Args.add(RValue::get(Order),
1231              getContext().IntTy);
1232     if (E->isOpenCL())
1233       Args.add(RValue::get(Scope), getContext().IntTy);
1234 
1235     // PostOp is only needed for the atomic_*_fetch operations, and
1236     // thus is only needed for and implemented in the
1237     // UseOptimizedLibcall codepath.
1238     assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1239 
1240     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1241     // The value is returned directly from the libcall.
1242     if (E->isCmpXChg())
1243       return Res;
1244 
1245     // The value is returned directly for optimized libcalls but the expr
1246     // provided an out-param.
1247     if (UseOptimizedLibcall && Res.getScalarVal()) {
1248       llvm::Value *ResVal = Res.getScalarVal();
1249       if (PostOpMinMax) {
1250         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1251         ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1252                                       E->getValueType()->isSignedIntegerType(),
1253                                       ResVal, LoadVal1);
1254       } else if (PostOp) {
1255         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1256         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1257       }
1258       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1259         ResVal = Builder.CreateNot(ResVal);
1260 
1261       Builder.CreateStore(
1262           ResVal,
1263           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1264     }
1265 
1266     if (RValTy->isVoidType())
1267       return RValue::get(nullptr);
1268 
1269     return convertTempToRValue(
1270         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1271         RValTy, E->getExprLoc());
1272   }
1273 
1274   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1275                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1276                  E->getOp() == AtomicExpr::AO__atomic_store ||
1277                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1278   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1279                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1280                 E->getOp() == AtomicExpr::AO__atomic_load ||
1281                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1282 
1283   if (isa<llvm::ConstantInt>(Order)) {
1284     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1285     // We should not ever get to a case where the ordering isn't a valid C ABI
1286     // value, but it's hard to enforce that in general.
1287     if (llvm::isValidAtomicOrderingCABI(ord))
1288       switch ((llvm::AtomicOrderingCABI)ord) {
1289       case llvm::AtomicOrderingCABI::relaxed:
1290         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1291                      llvm::AtomicOrdering::Monotonic, Scope);
1292         break;
1293       case llvm::AtomicOrderingCABI::consume:
1294       case llvm::AtomicOrderingCABI::acquire:
1295         if (IsStore)
1296           break; // Avoid crashing on code with undefined behavior
1297         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1298                      llvm::AtomicOrdering::Acquire, Scope);
1299         break;
1300       case llvm::AtomicOrderingCABI::release:
1301         if (IsLoad)
1302           break; // Avoid crashing on code with undefined behavior
1303         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1304                      llvm::AtomicOrdering::Release, Scope);
1305         break;
1306       case llvm::AtomicOrderingCABI::acq_rel:
1307         if (IsLoad || IsStore)
1308           break; // Avoid crashing on code with undefined behavior
1309         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1310                      llvm::AtomicOrdering::AcquireRelease, Scope);
1311         break;
1312       case llvm::AtomicOrderingCABI::seq_cst:
1313         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1314                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1315         break;
1316       }
1317     if (RValTy->isVoidType())
1318       return RValue::get(nullptr);
1319 
1320     return convertTempToRValue(
1321         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1322                                         Dest.getAddressSpace())),
1323         RValTy, E->getExprLoc());
1324   }
1325 
1326   // Long case, when Order isn't obviously constant.
1327 
1328   // Create all the relevant BB's
1329   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1330                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1331                    *SeqCstBB = nullptr;
1332   MonotonicBB = createBasicBlock("monotonic", CurFn);
1333   if (!IsStore)
1334     AcquireBB = createBasicBlock("acquire", CurFn);
1335   if (!IsLoad)
1336     ReleaseBB = createBasicBlock("release", CurFn);
1337   if (!IsLoad && !IsStore)
1338     AcqRelBB = createBasicBlock("acqrel", CurFn);
1339   SeqCstBB = createBasicBlock("seqcst", CurFn);
1340   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1341 
1342   // Create the switch for the split
1343   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1344   // doesn't matter unless someone is crazy enough to use something that
1345   // doesn't fold to a constant for the ordering.
1346   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1347   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1348 
1349   // Emit all the different atomics
1350   Builder.SetInsertPoint(MonotonicBB);
1351   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1352                llvm::AtomicOrdering::Monotonic, Scope);
1353   Builder.CreateBr(ContBB);
1354   if (!IsStore) {
1355     Builder.SetInsertPoint(AcquireBB);
1356     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1357                  llvm::AtomicOrdering::Acquire, Scope);
1358     Builder.CreateBr(ContBB);
1359     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1360                 AcquireBB);
1361     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1362                 AcquireBB);
1363   }
1364   if (!IsLoad) {
1365     Builder.SetInsertPoint(ReleaseBB);
1366     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1367                  llvm::AtomicOrdering::Release, Scope);
1368     Builder.CreateBr(ContBB);
1369     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1370                 ReleaseBB);
1371   }
1372   if (!IsLoad && !IsStore) {
1373     Builder.SetInsertPoint(AcqRelBB);
1374     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1375                  llvm::AtomicOrdering::AcquireRelease, Scope);
1376     Builder.CreateBr(ContBB);
1377     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1378                 AcqRelBB);
1379   }
1380   Builder.SetInsertPoint(SeqCstBB);
1381   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1382                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1383   Builder.CreateBr(ContBB);
1384   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1385               SeqCstBB);
1386 
1387   // Cleanup and return
1388   Builder.SetInsertPoint(ContBB);
1389   if (RValTy->isVoidType())
1390     return RValue::get(nullptr);
1391 
1392   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1393   return convertTempToRValue(
1394       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1395                                       Dest.getAddressSpace())),
1396       RValTy, E->getExprLoc());
1397 }
1398 
1399 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1400   unsigned addrspace =
1401     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1402   llvm::IntegerType *ty =
1403     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1404   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1405 }
1406 
1407 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1408   llvm::Type *Ty = Addr.getElementType();
1409   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1410   if (SourceSizeInBits != AtomicSizeInBits) {
1411     Address Tmp = CreateTempAlloca();
1412     CGF.Builder.CreateMemCpy(Tmp, Addr,
1413                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1414     Addr = Tmp;
1415   }
1416 
1417   return emitCastToAtomicIntPointer(Addr);
1418 }
1419 
1420 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1421                                              AggValueSlot resultSlot,
1422                                              SourceLocation loc,
1423                                              bool asValue) const {
1424   if (LVal.isSimple()) {
1425     if (EvaluationKind == TEK_Aggregate)
1426       return resultSlot.asRValue();
1427 
1428     // Drill into the padding structure if we have one.
1429     if (hasPadding())
1430       addr = CGF.Builder.CreateStructGEP(addr, 0);
1431 
1432     // Otherwise, just convert the temporary to an r-value using the
1433     // normal conversion routine.
1434     return CGF.convertTempToRValue(addr, getValueType(), loc);
1435   }
1436   if (!asValue)
1437     // Get RValue from temp memory as atomic for non-simple lvalues
1438     return RValue::get(CGF.Builder.CreateLoad(addr));
1439   if (LVal.isBitField())
1440     return CGF.EmitLoadOfBitfieldLValue(
1441         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1442                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1443   if (LVal.isVectorElt())
1444     return CGF.EmitLoadOfLValue(
1445         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1446                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1447   assert(LVal.isExtVectorElt());
1448   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1449       addr, LVal.getExtVectorElts(), LVal.getType(),
1450       LVal.getBaseInfo(), TBAAAccessInfo()));
1451 }
1452 
1453 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1454                                              AggValueSlot ResultSlot,
1455                                              SourceLocation Loc,
1456                                              bool AsValue) const {
1457   // Try not to in some easy cases.
1458   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1459   if (getEvaluationKind() == TEK_Scalar &&
1460       (((!LVal.isBitField() ||
1461          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1462         !hasPadding()) ||
1463        !AsValue)) {
1464     auto *ValTy = AsValue
1465                       ? CGF.ConvertTypeForMem(ValueTy)
1466                       : getAtomicAddress().getType()->getPointerElementType();
1467     if (ValTy->isIntegerTy()) {
1468       assert(IntVal->getType() == ValTy && "Different integer types.");
1469       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1470     } else if (ValTy->isPointerTy())
1471       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1472     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1473       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1474   }
1475 
1476   // Create a temporary.  This needs to be big enough to hold the
1477   // atomic integer.
1478   Address Temp = Address::invalid();
1479   bool TempIsVolatile = false;
1480   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1481     assert(!ResultSlot.isIgnored());
1482     Temp = ResultSlot.getAddress();
1483     TempIsVolatile = ResultSlot.isVolatile();
1484   } else {
1485     Temp = CreateTempAlloca();
1486   }
1487 
1488   // Slam the integer into the temporary.
1489   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1490   CGF.Builder.CreateStore(IntVal, CastTemp)
1491       ->setVolatile(TempIsVolatile);
1492 
1493   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1494 }
1495 
1496 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1497                                        llvm::AtomicOrdering AO, bool) {
1498   // void __atomic_load(size_t size, void *mem, void *return, int order);
1499   CallArgList Args;
1500   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1501   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1502            CGF.getContext().VoidPtrTy);
1503   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1504            CGF.getContext().VoidPtrTy);
1505   Args.add(
1506       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1507       CGF.getContext().IntTy);
1508   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1509 }
1510 
1511 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1512                                           bool IsVolatile) {
1513   // Okay, we're doing this natively.
1514   Address Addr = getAtomicAddressAsAtomicIntPointer();
1515   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1516   Load->setAtomic(AO);
1517 
1518   // Other decoration.
1519   if (IsVolatile)
1520     Load->setVolatile(true);
1521   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1522   return Load;
1523 }
1524 
1525 /// An LValue is a candidate for having its loads and stores be made atomic if
1526 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1527 /// performing such an operation can be performed without a libcall.
1528 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1529   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1530   AtomicInfo AI(*this, LV);
1531   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1532   // An atomic is inline if we don't need to use a libcall.
1533   bool AtomicIsInline = !AI.shouldUseLibcall();
1534   // MSVC doesn't seem to do this for types wider than a pointer.
1535   if (getContext().getTypeSize(LV.getType()) >
1536       getContext().getTypeSize(getContext().getIntPtrType()))
1537     return false;
1538   return IsVolatile && AtomicIsInline;
1539 }
1540 
1541 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1542                                        AggValueSlot Slot) {
1543   llvm::AtomicOrdering AO;
1544   bool IsVolatile = LV.isVolatileQualified();
1545   if (LV.getType()->isAtomicType()) {
1546     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1547   } else {
1548     AO = llvm::AtomicOrdering::Acquire;
1549     IsVolatile = true;
1550   }
1551   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1552 }
1553 
1554 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1555                                   bool AsValue, llvm::AtomicOrdering AO,
1556                                   bool IsVolatile) {
1557   // Check whether we should use a library call.
1558   if (shouldUseLibcall()) {
1559     Address TempAddr = Address::invalid();
1560     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1561       assert(getEvaluationKind() == TEK_Aggregate);
1562       TempAddr = ResultSlot.getAddress();
1563     } else
1564       TempAddr = CreateTempAlloca();
1565 
1566     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1567 
1568     // Okay, turn that back into the original value or whole atomic (for
1569     // non-simple lvalues) type.
1570     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1571   }
1572 
1573   // Okay, we're doing this natively.
1574   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1575 
1576   // If we're ignoring an aggregate return, don't do anything.
1577   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1578     return RValue::getAggregate(Address::invalid(), false);
1579 
1580   // Okay, turn that back into the original value or atomic (for non-simple
1581   // lvalues) type.
1582   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1583 }
1584 
1585 /// Emit a load from an l-value of atomic type.  Note that the r-value
1586 /// we produce is an r-value of the atomic *value* type.
1587 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1588                                        llvm::AtomicOrdering AO, bool IsVolatile,
1589                                        AggValueSlot resultSlot) {
1590   AtomicInfo Atomics(*this, src);
1591   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1592                                 IsVolatile);
1593 }
1594 
1595 /// Copy an r-value into memory as part of storing to an atomic type.
1596 /// This needs to create a bit-pattern suitable for atomic operations.
1597 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1598   assert(LVal.isSimple());
1599   // If we have an r-value, the rvalue should be of the atomic type,
1600   // which means that the caller is responsible for having zeroed
1601   // any padding.  Just do an aggregate copy of that type.
1602   if (rvalue.isAggregate()) {
1603     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1604     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1605                                     getAtomicType());
1606     bool IsVolatile = rvalue.isVolatileQualified() ||
1607                       LVal.isVolatileQualified();
1608     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1609                           AggValueSlot::DoesNotOverlap, IsVolatile);
1610     return;
1611   }
1612 
1613   // Okay, otherwise we're copying stuff.
1614 
1615   // Zero out the buffer if necessary.
1616   emitMemSetZeroIfNecessary();
1617 
1618   // Drill past the padding if present.
1619   LValue TempLVal = projectValue();
1620 
1621   // Okay, store the rvalue in.
1622   if (rvalue.isScalar()) {
1623     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1624   } else {
1625     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1626   }
1627 }
1628 
1629 
1630 /// Materialize an r-value into memory for the purposes of storing it
1631 /// to an atomic type.
1632 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1633   // Aggregate r-values are already in memory, and EmitAtomicStore
1634   // requires them to be values of the atomic type.
1635   if (rvalue.isAggregate())
1636     return rvalue.getAggregateAddress();
1637 
1638   // Otherwise, make a temporary and materialize into it.
1639   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1640   AtomicInfo Atomics(CGF, TempLV);
1641   Atomics.emitCopyIntoMemory(rvalue);
1642   return TempLV.getAddress(CGF);
1643 }
1644 
1645 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1646   // If we've got a scalar value of the right size, try to avoid going
1647   // through memory.
1648   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1649     llvm::Value *Value = RVal.getScalarVal();
1650     if (isa<llvm::IntegerType>(Value->getType()))
1651       return CGF.EmitToMemory(Value, ValueTy);
1652     else {
1653       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1654           CGF.getLLVMContext(),
1655           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1656       if (isa<llvm::PointerType>(Value->getType()))
1657         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1658       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1659         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1660     }
1661   }
1662   // Otherwise, we need to go through memory.
1663   // Put the r-value in memory.
1664   Address Addr = materializeRValue(RVal);
1665 
1666   // Cast the temporary to the atomic int type and pull a value out.
1667   Addr = emitCastToAtomicIntPointer(Addr);
1668   return CGF.Builder.CreateLoad(Addr);
1669 }
1670 
1671 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1672     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1673     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1674   // Do the atomic store.
1675   Address Addr = getAtomicAddressAsAtomicIntPointer();
1676   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1677                                                ExpectedVal, DesiredVal,
1678                                                Success, Failure);
1679   // Other decoration.
1680   Inst->setVolatile(LVal.isVolatileQualified());
1681   Inst->setWeak(IsWeak);
1682 
1683   // Okay, turn that back into the original value type.
1684   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1685   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1686   return std::make_pair(PreviousVal, SuccessFailureVal);
1687 }
1688 
1689 llvm::Value *
1690 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1691                                              llvm::Value *DesiredAddr,
1692                                              llvm::AtomicOrdering Success,
1693                                              llvm::AtomicOrdering Failure) {
1694   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1695   // void *desired, int success, int failure);
1696   CallArgList Args;
1697   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1698   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1699            CGF.getContext().VoidPtrTy);
1700   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1701            CGF.getContext().VoidPtrTy);
1702   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1703            CGF.getContext().VoidPtrTy);
1704   Args.add(RValue::get(
1705                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1706            CGF.getContext().IntTy);
1707   Args.add(RValue::get(
1708                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1709            CGF.getContext().IntTy);
1710   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1711                                               CGF.getContext().BoolTy, Args);
1712 
1713   return SuccessFailureRVal.getScalarVal();
1714 }
1715 
1716 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1717     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1718     llvm::AtomicOrdering Failure, bool IsWeak) {
1719   if (isStrongerThan(Failure, Success))
1720     // Don't assert on undefined behavior "failure argument shall be no stronger
1721     // than the success argument".
1722     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1723 
1724   // Check whether we should use a library call.
1725   if (shouldUseLibcall()) {
1726     // Produce a source address.
1727     Address ExpectedAddr = materializeRValue(Expected);
1728     Address DesiredAddr = materializeRValue(Desired);
1729     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1730                                                  DesiredAddr.getPointer(),
1731                                                  Success, Failure);
1732     return std::make_pair(
1733         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1734                                   SourceLocation(), /*AsValue=*/false),
1735         Res);
1736   }
1737 
1738   // If we've got a scalar value of the right size, try to avoid going
1739   // through memory.
1740   auto *ExpectedVal = convertRValueToInt(Expected);
1741   auto *DesiredVal = convertRValueToInt(Desired);
1742   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1743                                          Failure, IsWeak);
1744   return std::make_pair(
1745       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1746                                 SourceLocation(), /*AsValue=*/false),
1747       Res.second);
1748 }
1749 
1750 static void
1751 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1752                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1753                       Address DesiredAddr) {
1754   RValue UpRVal;
1755   LValue AtomicLVal = Atomics.getAtomicLValue();
1756   LValue DesiredLVal;
1757   if (AtomicLVal.isSimple()) {
1758     UpRVal = OldRVal;
1759     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1760   } else {
1761     // Build new lvalue for temp address.
1762     Address Ptr = Atomics.materializeRValue(OldRVal);
1763     LValue UpdateLVal;
1764     if (AtomicLVal.isBitField()) {
1765       UpdateLVal =
1766           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1767                                AtomicLVal.getType(),
1768                                AtomicLVal.getBaseInfo(),
1769                                AtomicLVal.getTBAAInfo());
1770       DesiredLVal =
1771           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1772                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1773                                AtomicLVal.getTBAAInfo());
1774     } else if (AtomicLVal.isVectorElt()) {
1775       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1776                                          AtomicLVal.getType(),
1777                                          AtomicLVal.getBaseInfo(),
1778                                          AtomicLVal.getTBAAInfo());
1779       DesiredLVal = LValue::MakeVectorElt(
1780           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1781           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1782     } else {
1783       assert(AtomicLVal.isExtVectorElt());
1784       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1785                                             AtomicLVal.getType(),
1786                                             AtomicLVal.getBaseInfo(),
1787                                             AtomicLVal.getTBAAInfo());
1788       DesiredLVal = LValue::MakeExtVectorElt(
1789           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1790           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1791     }
1792     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1793   }
1794   // Store new value in the corresponding memory area.
1795   RValue NewRVal = UpdateOp(UpRVal);
1796   if (NewRVal.isScalar()) {
1797     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1798   } else {
1799     assert(NewRVal.isComplex());
1800     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1801                            /*isInit=*/false);
1802   }
1803 }
1804 
1805 void AtomicInfo::EmitAtomicUpdateLibcall(
1806     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1807     bool IsVolatile) {
1808   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1809 
1810   Address ExpectedAddr = CreateTempAlloca();
1811 
1812   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1813   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1814   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1815   CGF.EmitBlock(ContBB);
1816   Address DesiredAddr = CreateTempAlloca();
1817   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1818       requiresMemSetZero(getAtomicAddress().getElementType())) {
1819     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1820     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1821   }
1822   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1823                                            AggValueSlot::ignored(),
1824                                            SourceLocation(), /*AsValue=*/false);
1825   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1826   auto *Res =
1827       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1828                                        DesiredAddr.getPointer(),
1829                                        AO, Failure);
1830   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1831   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1832 }
1833 
1834 void AtomicInfo::EmitAtomicUpdateOp(
1835     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1836     bool IsVolatile) {
1837   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1838 
1839   // Do the atomic load.
1840   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1841   // For non-simple lvalues perform compare-and-swap procedure.
1842   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1843   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1844   auto *CurBB = CGF.Builder.GetInsertBlock();
1845   CGF.EmitBlock(ContBB);
1846   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1847                                              /*NumReservedValues=*/2);
1848   PHI->addIncoming(OldVal, CurBB);
1849   Address NewAtomicAddr = CreateTempAlloca();
1850   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1851   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1852       requiresMemSetZero(getAtomicAddress().getElementType())) {
1853     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1854   }
1855   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1856                                            SourceLocation(), /*AsValue=*/false);
1857   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1858   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1859   // Try to write new value using cmpxchg operation.
1860   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1861   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1862   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1863   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1864 }
1865 
1866 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1867                                   RValue UpdateRVal, Address DesiredAddr) {
1868   LValue AtomicLVal = Atomics.getAtomicLValue();
1869   LValue DesiredLVal;
1870   // Build new lvalue for temp address.
1871   if (AtomicLVal.isBitField()) {
1872     DesiredLVal =
1873         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1874                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1875                              AtomicLVal.getTBAAInfo());
1876   } else if (AtomicLVal.isVectorElt()) {
1877     DesiredLVal =
1878         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1879                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1880                               AtomicLVal.getTBAAInfo());
1881   } else {
1882     assert(AtomicLVal.isExtVectorElt());
1883     DesiredLVal = LValue::MakeExtVectorElt(
1884         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1885         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1886   }
1887   // Store new value in the corresponding memory area.
1888   assert(UpdateRVal.isScalar());
1889   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1890 }
1891 
1892 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1893                                          RValue UpdateRVal, bool IsVolatile) {
1894   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1895 
1896   Address ExpectedAddr = CreateTempAlloca();
1897 
1898   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1899   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1900   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1901   CGF.EmitBlock(ContBB);
1902   Address DesiredAddr = CreateTempAlloca();
1903   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1904       requiresMemSetZero(getAtomicAddress().getElementType())) {
1905     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1906     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1907   }
1908   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1909   auto *Res =
1910       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1911                                        DesiredAddr.getPointer(),
1912                                        AO, Failure);
1913   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1914   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1915 }
1916 
1917 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1918                                     bool IsVolatile) {
1919   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1920 
1921   // Do the atomic load.
1922   auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1923   // For non-simple lvalues perform compare-and-swap procedure.
1924   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1925   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1926   auto *CurBB = CGF.Builder.GetInsertBlock();
1927   CGF.EmitBlock(ContBB);
1928   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1929                                              /*NumReservedValues=*/2);
1930   PHI->addIncoming(OldVal, CurBB);
1931   Address NewAtomicAddr = CreateTempAlloca();
1932   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1933   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1934       requiresMemSetZero(getAtomicAddress().getElementType())) {
1935     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1936   }
1937   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1938   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1939   // Try to write new value using cmpxchg operation.
1940   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1941   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1942   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1943   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1944 }
1945 
1946 void AtomicInfo::EmitAtomicUpdate(
1947     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1948     bool IsVolatile) {
1949   if (shouldUseLibcall()) {
1950     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1951   } else {
1952     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1953   }
1954 }
1955 
1956 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1957                                   bool IsVolatile) {
1958   if (shouldUseLibcall()) {
1959     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1960   } else {
1961     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1962   }
1963 }
1964 
1965 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1966                                       bool isInit) {
1967   bool IsVolatile = lvalue.isVolatileQualified();
1968   llvm::AtomicOrdering AO;
1969   if (lvalue.getType()->isAtomicType()) {
1970     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1971   } else {
1972     AO = llvm::AtomicOrdering::Release;
1973     IsVolatile = true;
1974   }
1975   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1976 }
1977 
1978 /// Emit a store to an l-value of atomic type.
1979 ///
1980 /// Note that the r-value is expected to be an r-value *of the atomic
1981 /// type*; this means that for aggregate r-values, it should include
1982 /// storage for any padding that was necessary.
1983 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1984                                       llvm::AtomicOrdering AO, bool IsVolatile,
1985                                       bool isInit) {
1986   // If this is an aggregate r-value, it should agree in type except
1987   // maybe for address-space qualification.
1988   assert(!rvalue.isAggregate() ||
1989          rvalue.getAggregateAddress().getElementType() ==
1990              dest.getAddress(*this).getElementType());
1991 
1992   AtomicInfo atomics(*this, dest);
1993   LValue LVal = atomics.getAtomicLValue();
1994 
1995   // If this is an initialization, just put the value there normally.
1996   if (LVal.isSimple()) {
1997     if (isInit) {
1998       atomics.emitCopyIntoMemory(rvalue);
1999       return;
2000     }
2001 
2002     // Check whether we should use a library call.
2003     if (atomics.shouldUseLibcall()) {
2004       // Produce a source address.
2005       Address srcAddr = atomics.materializeRValue(rvalue);
2006 
2007       // void __atomic_store(size_t size, void *mem, void *val, int order)
2008       CallArgList args;
2009       args.add(RValue::get(atomics.getAtomicSizeValue()),
2010                getContext().getSizeType());
2011       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2012                getContext().VoidPtrTy);
2013       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2014                getContext().VoidPtrTy);
2015       args.add(
2016           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2017           getContext().IntTy);
2018       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2019       return;
2020     }
2021 
2022     // Okay, we're doing this natively.
2023     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2024 
2025     // Do the atomic store.
2026     Address addr =
2027         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2028     intValue = Builder.CreateIntCast(
2029         intValue, addr.getElementType(), /*isSigned=*/false);
2030     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2031 
2032     if (AO == llvm::AtomicOrdering::Acquire)
2033       AO = llvm::AtomicOrdering::Monotonic;
2034     else if (AO == llvm::AtomicOrdering::AcquireRelease)
2035       AO = llvm::AtomicOrdering::Release;
2036     // Initializations don't need to be atomic.
2037     if (!isInit)
2038       store->setAtomic(AO);
2039 
2040     // Other decoration.
2041     if (IsVolatile)
2042       store->setVolatile(true);
2043     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2044     return;
2045   }
2046 
2047   // Emit simple atomic update operation.
2048   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2049 }
2050 
2051 /// Emit a compare-and-exchange op for atomic type.
2052 ///
2053 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2054     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2055     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2056     AggValueSlot Slot) {
2057   // If this is an aggregate r-value, it should agree in type except
2058   // maybe for address-space qualification.
2059   assert(!Expected.isAggregate() ||
2060          Expected.getAggregateAddress().getElementType() ==
2061              Obj.getAddress(*this).getElementType());
2062   assert(!Desired.isAggregate() ||
2063          Desired.getAggregateAddress().getElementType() ==
2064              Obj.getAddress(*this).getElementType());
2065   AtomicInfo Atomics(*this, Obj);
2066 
2067   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2068                                            IsWeak);
2069 }
2070 
2071 void CodeGenFunction::EmitAtomicUpdate(
2072     LValue LVal, llvm::AtomicOrdering AO,
2073     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2074   AtomicInfo Atomics(*this, LVal);
2075   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2076 }
2077 
2078 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2079   AtomicInfo atomics(*this, dest);
2080 
2081   switch (atomics.getEvaluationKind()) {
2082   case TEK_Scalar: {
2083     llvm::Value *value = EmitScalarExpr(init);
2084     atomics.emitCopyIntoMemory(RValue::get(value));
2085     return;
2086   }
2087 
2088   case TEK_Complex: {
2089     ComplexPairTy value = EmitComplexExpr(init);
2090     atomics.emitCopyIntoMemory(RValue::getComplex(value));
2091     return;
2092   }
2093 
2094   case TEK_Aggregate: {
2095     // Fix up the destination if the initializer isn't an expression
2096     // of atomic type.
2097     bool Zeroed = false;
2098     if (!init->getType()->isAtomicType()) {
2099       Zeroed = atomics.emitMemSetZeroIfNecessary();
2100       dest = atomics.projectValue();
2101     }
2102 
2103     // Evaluate the expression directly into the destination.
2104     AggValueSlot slot = AggValueSlot::forLValue(
2105         dest, *this, AggValueSlot::IsNotDestructed,
2106         AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2107         AggValueSlot::DoesNotOverlap,
2108         Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2109 
2110     EmitAggExpr(init, slot);
2111     return;
2112   }
2113   }
2114   llvm_unreachable("bad evaluation kind");
2115 }
2116