1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenModule.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
24 
25 using namespace clang;
26 using namespace CodeGen;
27 
28 namespace {
29   class AtomicInfo {
30     CodeGenFunction &CGF;
31     QualType AtomicTy;
32     QualType ValueTy;
33     uint64_t AtomicSizeInBits;
34     uint64_t ValueSizeInBits;
35     CharUnits AtomicAlign;
36     CharUnits ValueAlign;
37     CharUnits LValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         auto &OrigBFI = lvalue.getBitFieldInfo();
77         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
78         AtomicSizeInBits = C.toBits(
79             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
80                 .RoundUpToAlignment(lvalue.getAlignment()));
81         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
82         auto OffsetInChars =
83             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
84             lvalue.getAlignment();
85         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
86             VoidPtrAddr, OffsetInChars.getQuantity());
87         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
88             VoidPtrAddr,
89             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
90             "atomic_bitfield_base");
91         BFI = OrigBFI;
92         BFI.Offset = Offset;
93         BFI.StorageSize = AtomicSizeInBits;
94         LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
95                                     lvalue.getAlignment());
96       } else if (lvalue.isVectorElt()) {
97         AtomicSizeInBits = C.getTypeSize(lvalue.getType());
98         LVal = lvalue;
99       } else {
100         assert(lvalue.isExtVectorElt());
101         AtomicSizeInBits = C.getTypeSize(lvalue.getType());
102         LVal = lvalue;
103       }
104       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
105           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
106     }
107 
108     QualType getAtomicType() const { return AtomicTy; }
109     QualType getValueType() const { return ValueTy; }
110     CharUnits getAtomicAlignment() const { return AtomicAlign; }
111     CharUnits getValueAlignment() const { return ValueAlign; }
112     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
113     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
114     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
115     bool shouldUseLibcall() const { return UseLibcall; }
116     const LValue &getAtomicLValue() const { return LVal; }
117 
118     /// Is the atomic size larger than the underlying value type?
119     ///
120     /// Note that the absence of padding does not mean that atomic
121     /// objects are completely interchangeable with non-atomic
122     /// objects: we might have promoted the alignment of a type
123     /// without making it bigger.
124     bool hasPadding() const {
125       return (ValueSizeInBits != AtomicSizeInBits);
126     }
127 
128     bool emitMemSetZeroIfNecessary() const;
129 
130     llvm::Value *getAtomicSizeValue() const {
131       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
132       return CGF.CGM.getSize(size);
133     }
134 
135     /// Cast the given pointer to an integer pointer suitable for
136     /// atomic operations.
137     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
138 
139     /// Turn an atomic-layout object into an r-value.
140     RValue convertTempToRValue(llvm::Value *addr,
141                                AggValueSlot resultSlot,
142                                SourceLocation loc) const;
143 
144     /// \brief Converts a rvalue to integer value.
145     llvm::Value *convertRValueToInt(RValue RVal) const;
146 
147     RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
148                              SourceLocation Loc) const;
149 
150     /// Copy an atomic r-value into atomic-layout memory.
151     void emitCopyIntoMemory(RValue rvalue) const;
152 
153     /// Project an l-value down to the value field.
154     LValue projectValue() const {
155       assert(LVal.isSimple());
156       llvm::Value *addr = LVal.getAddress();
157       if (hasPadding())
158         addr = CGF.Builder.CreateStructGEP(addr, 0);
159 
160       return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
161                               CGF.getContext(), LVal.getTBAAInfo());
162     }
163 
164     /// Materialize an atomic r-value in atomic-layout memory.
165     llvm::Value *materializeRValue(RValue rvalue) const;
166 
167   private:
168     bool requiresMemSetZero(llvm::Type *type) const;
169   };
170 }
171 
172 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
173                                 StringRef fnName,
174                                 QualType resultType,
175                                 CallArgList &args) {
176   const CGFunctionInfo &fnInfo =
177     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
178             FunctionType::ExtInfo(), RequiredArgs::All);
179   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
180   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
181   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
182 }
183 
184 /// Does a store of the given IR type modify the full expected width?
185 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
186                            uint64_t expectedSize) {
187   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
188 }
189 
190 /// Does the atomic type require memsetting to zero before initialization?
191 ///
192 /// The IR type is provided as a way of making certain queries faster.
193 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
194   // If the atomic type has size padding, we definitely need a memset.
195   if (hasPadding()) return true;
196 
197   // Otherwise, do some simple heuristics to try to avoid it:
198   switch (getEvaluationKind()) {
199   // For scalars and complexes, check whether the store size of the
200   // type uses the full size.
201   case TEK_Scalar:
202     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
203   case TEK_Complex:
204     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
205                            AtomicSizeInBits / 2);
206 
207   // Padding in structs has an undefined bit pattern.  User beware.
208   case TEK_Aggregate:
209     return false;
210   }
211   llvm_unreachable("bad evaluation kind");
212 }
213 
214 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
215   assert(LVal.isSimple());
216   llvm::Value *addr = LVal.getAddress();
217   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
218     return false;
219 
220   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
221                            AtomicSizeInBits / 8,
222                            LVal.getAlignment().getQuantity());
223   return true;
224 }
225 
226 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
227                               llvm::Value *Dest, llvm::Value *Ptr,
228                               llvm::Value *Val1, llvm::Value *Val2,
229                               uint64_t Size, unsigned Align,
230                               llvm::AtomicOrdering SuccessOrder,
231                               llvm::AtomicOrdering FailureOrder) {
232   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
233   llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
234   Expected->setAlignment(Align);
235   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
236   Desired->setAlignment(Align);
237 
238   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
239       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
240   Pair->setVolatile(E->isVolatile());
241   Pair->setWeak(IsWeak);
242 
243   // Cmp holds the result of the compare-exchange operation: true on success,
244   // false on failure.
245   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
246   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
247 
248   // This basic block is used to hold the store instruction if the operation
249   // failed.
250   llvm::BasicBlock *StoreExpectedBB =
251       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
252 
253   // This basic block is the exit point of the operation, we should end up
254   // here regardless of whether or not the operation succeeded.
255   llvm::BasicBlock *ContinueBB =
256       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
257 
258   // Update Expected if Expected isn't equal to Old, otherwise branch to the
259   // exit point.
260   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
261 
262   CGF.Builder.SetInsertPoint(StoreExpectedBB);
263   // Update the memory at Expected with Old's value.
264   llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
265   StoreExpected->setAlignment(Align);
266   // Finally, branch to the exit point.
267   CGF.Builder.CreateBr(ContinueBB);
268 
269   CGF.Builder.SetInsertPoint(ContinueBB);
270   // Update the memory at Dest with Cmp's value.
271   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
272   return;
273 }
274 
275 /// Given an ordering required on success, emit all possible cmpxchg
276 /// instructions to cope with the provided (but possibly only dynamically known)
277 /// FailureOrder.
278 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
279                                         bool IsWeak, llvm::Value *Dest,
280                                         llvm::Value *Ptr, llvm::Value *Val1,
281                                         llvm::Value *Val2,
282                                         llvm::Value *FailureOrderVal,
283                                         uint64_t Size, unsigned Align,
284                                         llvm::AtomicOrdering SuccessOrder) {
285   llvm::AtomicOrdering FailureOrder;
286   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
287     switch (FO->getSExtValue()) {
288     default:
289       FailureOrder = llvm::Monotonic;
290       break;
291     case AtomicExpr::AO_ABI_memory_order_consume:
292     case AtomicExpr::AO_ABI_memory_order_acquire:
293       FailureOrder = llvm::Acquire;
294       break;
295     case AtomicExpr::AO_ABI_memory_order_seq_cst:
296       FailureOrder = llvm::SequentiallyConsistent;
297       break;
298     }
299     if (FailureOrder >= SuccessOrder) {
300       // Don't assert on undefined behaviour.
301       FailureOrder =
302         llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
303     }
304     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
305                       SuccessOrder, FailureOrder);
306     return;
307   }
308 
309   // Create all the relevant BB's
310   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
311                    *SeqCstBB = nullptr;
312   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
313   if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
314     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
315   if (SuccessOrder == llvm::SequentiallyConsistent)
316     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
317 
318   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
319 
320   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
321 
322   // Emit all the different atomics
323 
324   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
325   // doesn't matter unless someone is crazy enough to use something that
326   // doesn't fold to a constant for the ordering.
327   CGF.Builder.SetInsertPoint(MonotonicBB);
328   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
329                     Size, Align, SuccessOrder, llvm::Monotonic);
330   CGF.Builder.CreateBr(ContBB);
331 
332   if (AcquireBB) {
333     CGF.Builder.SetInsertPoint(AcquireBB);
334     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
335                       Size, Align, SuccessOrder, llvm::Acquire);
336     CGF.Builder.CreateBr(ContBB);
337     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
338                 AcquireBB);
339     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
340                 AcquireBB);
341   }
342   if (SeqCstBB) {
343     CGF.Builder.SetInsertPoint(SeqCstBB);
344     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
345                       Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
346     CGF.Builder.CreateBr(ContBB);
347     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
348                 SeqCstBB);
349   }
350 
351   CGF.Builder.SetInsertPoint(ContBB);
352 }
353 
354 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
355                          llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
356                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
357                          uint64_t Size, unsigned Align,
358                          llvm::AtomicOrdering Order) {
359   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
360   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
361 
362   switch (E->getOp()) {
363   case AtomicExpr::AO__c11_atomic_init:
364     llvm_unreachable("Already handled!");
365 
366   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
367     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
368                                 FailureOrder, Size, Align, Order);
369     return;
370   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
371     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
372                                 FailureOrder, Size, Align, Order);
373     return;
374   case AtomicExpr::AO__atomic_compare_exchange:
375   case AtomicExpr::AO__atomic_compare_exchange_n: {
376     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
377       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
378                                   Val1, Val2, FailureOrder, Size, Align, Order);
379     } else {
380       // Create all the relevant BB's
381       llvm::BasicBlock *StrongBB =
382           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
383       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
384       llvm::BasicBlock *ContBB =
385           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
386 
387       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
388       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
389 
390       CGF.Builder.SetInsertPoint(StrongBB);
391       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
392                                   FailureOrder, Size, Align, Order);
393       CGF.Builder.CreateBr(ContBB);
394 
395       CGF.Builder.SetInsertPoint(WeakBB);
396       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
397                                   FailureOrder, Size, Align, Order);
398       CGF.Builder.CreateBr(ContBB);
399 
400       CGF.Builder.SetInsertPoint(ContBB);
401     }
402     return;
403   }
404   case AtomicExpr::AO__c11_atomic_load:
405   case AtomicExpr::AO__atomic_load_n:
406   case AtomicExpr::AO__atomic_load: {
407     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
408     Load->setAtomic(Order);
409     Load->setAlignment(Size);
410     Load->setVolatile(E->isVolatile());
411     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
412     StoreDest->setAlignment(Align);
413     return;
414   }
415 
416   case AtomicExpr::AO__c11_atomic_store:
417   case AtomicExpr::AO__atomic_store:
418   case AtomicExpr::AO__atomic_store_n: {
419     assert(!Dest && "Store does not return a value");
420     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
421     LoadVal1->setAlignment(Align);
422     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
423     Store->setAtomic(Order);
424     Store->setAlignment(Size);
425     Store->setVolatile(E->isVolatile());
426     return;
427   }
428 
429   case AtomicExpr::AO__c11_atomic_exchange:
430   case AtomicExpr::AO__atomic_exchange_n:
431   case AtomicExpr::AO__atomic_exchange:
432     Op = llvm::AtomicRMWInst::Xchg;
433     break;
434 
435   case AtomicExpr::AO__atomic_add_fetch:
436     PostOp = llvm::Instruction::Add;
437     // Fall through.
438   case AtomicExpr::AO__c11_atomic_fetch_add:
439   case AtomicExpr::AO__atomic_fetch_add:
440     Op = llvm::AtomicRMWInst::Add;
441     break;
442 
443   case AtomicExpr::AO__atomic_sub_fetch:
444     PostOp = llvm::Instruction::Sub;
445     // Fall through.
446   case AtomicExpr::AO__c11_atomic_fetch_sub:
447   case AtomicExpr::AO__atomic_fetch_sub:
448     Op = llvm::AtomicRMWInst::Sub;
449     break;
450 
451   case AtomicExpr::AO__atomic_and_fetch:
452     PostOp = llvm::Instruction::And;
453     // Fall through.
454   case AtomicExpr::AO__c11_atomic_fetch_and:
455   case AtomicExpr::AO__atomic_fetch_and:
456     Op = llvm::AtomicRMWInst::And;
457     break;
458 
459   case AtomicExpr::AO__atomic_or_fetch:
460     PostOp = llvm::Instruction::Or;
461     // Fall through.
462   case AtomicExpr::AO__c11_atomic_fetch_or:
463   case AtomicExpr::AO__atomic_fetch_or:
464     Op = llvm::AtomicRMWInst::Or;
465     break;
466 
467   case AtomicExpr::AO__atomic_xor_fetch:
468     PostOp = llvm::Instruction::Xor;
469     // Fall through.
470   case AtomicExpr::AO__c11_atomic_fetch_xor:
471   case AtomicExpr::AO__atomic_fetch_xor:
472     Op = llvm::AtomicRMWInst::Xor;
473     break;
474 
475   case AtomicExpr::AO__atomic_nand_fetch:
476     PostOp = llvm::Instruction::And;
477     // Fall through.
478   case AtomicExpr::AO__atomic_fetch_nand:
479     Op = llvm::AtomicRMWInst::Nand;
480     break;
481   }
482 
483   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
484   LoadVal1->setAlignment(Align);
485   llvm::AtomicRMWInst *RMWI =
486       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
487   RMWI->setVolatile(E->isVolatile());
488 
489   // For __atomic_*_fetch operations, perform the operation again to
490   // determine the value which was written.
491   llvm::Value *Result = RMWI;
492   if (PostOp)
493     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
494   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
495     Result = CGF.Builder.CreateNot(Result);
496   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
497   StoreDest->setAlignment(Align);
498 }
499 
500 // This function emits any expression (scalar, complex, or aggregate)
501 // into a temporary alloca.
502 static llvm::Value *
503 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
504   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
505   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
506                        /*Init*/ true);
507   return DeclPtr;
508 }
509 
510 static void
511 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
512                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
513                   SourceLocation Loc, CharUnits SizeInChars) {
514   if (UseOptimizedLibcall) {
515     // Load value and pass it to the function directly.
516     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
517     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
518     ValTy =
519         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
520     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
521                                                 SizeInBits)->getPointerTo();
522     Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
523                                Align, CGF.getContext().getPointerType(ValTy),
524                                Loc);
525     // Coerce the value into an appropriately sized integer type.
526     Args.add(RValue::get(Val), ValTy);
527   } else {
528     // Non-optimized functions always take a reference.
529     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
530                          CGF.getContext().VoidPtrTy);
531   }
532 }
533 
534 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
535   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
536   QualType MemTy = AtomicTy;
537   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
538     MemTy = AT->getValueType();
539   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
540   uint64_t Size = sizeChars.getQuantity();
541   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
542   unsigned Align = alignChars.getQuantity();
543   unsigned MaxInlineWidthInBits =
544     getTarget().getMaxAtomicInlineWidth();
545   bool UseLibcall = (Size != Align ||
546                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
547 
548   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
549               *Val2 = nullptr;
550   llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
551 
552   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
553     assert(!Dest && "Init does not return a value");
554     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
555     EmitAtomicInit(E->getVal1(), lvalue);
556     return RValue::get(nullptr);
557   }
558 
559   llvm::Value *Order = EmitScalarExpr(E->getOrder());
560 
561   switch (E->getOp()) {
562   case AtomicExpr::AO__c11_atomic_init:
563     llvm_unreachable("Already handled!");
564 
565   case AtomicExpr::AO__c11_atomic_load:
566   case AtomicExpr::AO__atomic_load_n:
567     break;
568 
569   case AtomicExpr::AO__atomic_load:
570     Dest = EmitScalarExpr(E->getVal1());
571     break;
572 
573   case AtomicExpr::AO__atomic_store:
574     Val1 = EmitScalarExpr(E->getVal1());
575     break;
576 
577   case AtomicExpr::AO__atomic_exchange:
578     Val1 = EmitScalarExpr(E->getVal1());
579     Dest = EmitScalarExpr(E->getVal2());
580     break;
581 
582   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
583   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
584   case AtomicExpr::AO__atomic_compare_exchange_n:
585   case AtomicExpr::AO__atomic_compare_exchange:
586     Val1 = EmitScalarExpr(E->getVal1());
587     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
588       Val2 = EmitScalarExpr(E->getVal2());
589     else
590       Val2 = EmitValToTemp(*this, E->getVal2());
591     OrderFail = EmitScalarExpr(E->getOrderFail());
592     if (E->getNumSubExprs() == 6)
593       IsWeak = EmitScalarExpr(E->getWeak());
594     break;
595 
596   case AtomicExpr::AO__c11_atomic_fetch_add:
597   case AtomicExpr::AO__c11_atomic_fetch_sub:
598     if (MemTy->isPointerType()) {
599       // For pointer arithmetic, we're required to do a bit of math:
600       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
601       // ... but only for the C11 builtins. The GNU builtins expect the
602       // user to multiply by sizeof(T).
603       QualType Val1Ty = E->getVal1()->getType();
604       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
605       CharUnits PointeeIncAmt =
606           getContext().getTypeSizeInChars(MemTy->getPointeeType());
607       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
608       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
609       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
610       break;
611     }
612     // Fall through.
613   case AtomicExpr::AO__atomic_fetch_add:
614   case AtomicExpr::AO__atomic_fetch_sub:
615   case AtomicExpr::AO__atomic_add_fetch:
616   case AtomicExpr::AO__atomic_sub_fetch:
617   case AtomicExpr::AO__c11_atomic_store:
618   case AtomicExpr::AO__c11_atomic_exchange:
619   case AtomicExpr::AO__atomic_store_n:
620   case AtomicExpr::AO__atomic_exchange_n:
621   case AtomicExpr::AO__c11_atomic_fetch_and:
622   case AtomicExpr::AO__c11_atomic_fetch_or:
623   case AtomicExpr::AO__c11_atomic_fetch_xor:
624   case AtomicExpr::AO__atomic_fetch_and:
625   case AtomicExpr::AO__atomic_fetch_or:
626   case AtomicExpr::AO__atomic_fetch_xor:
627   case AtomicExpr::AO__atomic_fetch_nand:
628   case AtomicExpr::AO__atomic_and_fetch:
629   case AtomicExpr::AO__atomic_or_fetch:
630   case AtomicExpr::AO__atomic_xor_fetch:
631   case AtomicExpr::AO__atomic_nand_fetch:
632     Val1 = EmitValToTemp(*this, E->getVal1());
633     break;
634   }
635 
636   QualType RValTy = E->getType().getUnqualifiedType();
637 
638   auto GetDest = [&] {
639     if (!RValTy->isVoidType() && !Dest) {
640       Dest = CreateMemTemp(RValTy, ".atomicdst");
641     }
642     return Dest;
643   };
644 
645   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
646   if (UseLibcall) {
647     bool UseOptimizedLibcall = false;
648     switch (E->getOp()) {
649     case AtomicExpr::AO__c11_atomic_fetch_add:
650     case AtomicExpr::AO__atomic_fetch_add:
651     case AtomicExpr::AO__c11_atomic_fetch_and:
652     case AtomicExpr::AO__atomic_fetch_and:
653     case AtomicExpr::AO__c11_atomic_fetch_or:
654     case AtomicExpr::AO__atomic_fetch_or:
655     case AtomicExpr::AO__c11_atomic_fetch_sub:
656     case AtomicExpr::AO__atomic_fetch_sub:
657     case AtomicExpr::AO__c11_atomic_fetch_xor:
658     case AtomicExpr::AO__atomic_fetch_xor:
659       // For these, only library calls for certain sizes exist.
660       UseOptimizedLibcall = true;
661       break;
662     default:
663       // Only use optimized library calls for sizes for which they exist.
664       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
665         UseOptimizedLibcall = true;
666       break;
667     }
668 
669     CallArgList Args;
670     if (!UseOptimizedLibcall) {
671       // For non-optimized library calls, the size is the first parameter
672       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
673                getContext().getSizeType());
674     }
675     // Atomic address is the first or second parameter
676     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
677 
678     std::string LibCallName;
679     QualType LoweredMemTy =
680       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
681     QualType RetTy;
682     bool HaveRetTy = false;
683     switch (E->getOp()) {
684     // There is only one libcall for compare an exchange, because there is no
685     // optimisation benefit possible from a libcall version of a weak compare
686     // and exchange.
687     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
688     //                                void *desired, int success, int failure)
689     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
690     //                                  int success, int failure)
691     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
692     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
693     case AtomicExpr::AO__atomic_compare_exchange:
694     case AtomicExpr::AO__atomic_compare_exchange_n:
695       LibCallName = "__atomic_compare_exchange";
696       RetTy = getContext().BoolTy;
697       HaveRetTy = true;
698       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
699       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
700                         E->getExprLoc(), sizeChars);
701       Args.add(RValue::get(Order), getContext().IntTy);
702       Order = OrderFail;
703       break;
704     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
705     //                        int order)
706     // T __atomic_exchange_N(T *mem, T val, int order)
707     case AtomicExpr::AO__c11_atomic_exchange:
708     case AtomicExpr::AO__atomic_exchange_n:
709     case AtomicExpr::AO__atomic_exchange:
710       LibCallName = "__atomic_exchange";
711       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
712                         E->getExprLoc(), sizeChars);
713       break;
714     // void __atomic_store(size_t size, void *mem, void *val, int order)
715     // void __atomic_store_N(T *mem, T val, int order)
716     case AtomicExpr::AO__c11_atomic_store:
717     case AtomicExpr::AO__atomic_store:
718     case AtomicExpr::AO__atomic_store_n:
719       LibCallName = "__atomic_store";
720       RetTy = getContext().VoidTy;
721       HaveRetTy = true;
722       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
723                         E->getExprLoc(), sizeChars);
724       break;
725     // void __atomic_load(size_t size, void *mem, void *return, int order)
726     // T __atomic_load_N(T *mem, int order)
727     case AtomicExpr::AO__c11_atomic_load:
728     case AtomicExpr::AO__atomic_load:
729     case AtomicExpr::AO__atomic_load_n:
730       LibCallName = "__atomic_load";
731       break;
732     // T __atomic_fetch_add_N(T *mem, T val, int order)
733     case AtomicExpr::AO__c11_atomic_fetch_add:
734     case AtomicExpr::AO__atomic_fetch_add:
735       LibCallName = "__atomic_fetch_add";
736       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
737                         E->getExprLoc(), sizeChars);
738       break;
739     // T __atomic_fetch_and_N(T *mem, T val, int order)
740     case AtomicExpr::AO__c11_atomic_fetch_and:
741     case AtomicExpr::AO__atomic_fetch_and:
742       LibCallName = "__atomic_fetch_and";
743       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
744                         E->getExprLoc(), sizeChars);
745       break;
746     // T __atomic_fetch_or_N(T *mem, T val, int order)
747     case AtomicExpr::AO__c11_atomic_fetch_or:
748     case AtomicExpr::AO__atomic_fetch_or:
749       LibCallName = "__atomic_fetch_or";
750       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
751                         E->getExprLoc(), sizeChars);
752       break;
753     // T __atomic_fetch_sub_N(T *mem, T val, int order)
754     case AtomicExpr::AO__c11_atomic_fetch_sub:
755     case AtomicExpr::AO__atomic_fetch_sub:
756       LibCallName = "__atomic_fetch_sub";
757       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
758                         E->getExprLoc(), sizeChars);
759       break;
760     // T __atomic_fetch_xor_N(T *mem, T val, int order)
761     case AtomicExpr::AO__c11_atomic_fetch_xor:
762     case AtomicExpr::AO__atomic_fetch_xor:
763       LibCallName = "__atomic_fetch_xor";
764       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
765                         E->getExprLoc(), sizeChars);
766       break;
767     default: return EmitUnsupportedRValue(E, "atomic library call");
768     }
769 
770     // Optimized functions have the size in their name.
771     if (UseOptimizedLibcall)
772       LibCallName += "_" + llvm::utostr(Size);
773     // By default, assume we return a value of the atomic type.
774     if (!HaveRetTy) {
775       if (UseOptimizedLibcall) {
776         // Value is returned directly.
777         // The function returns an appropriately sized integer type.
778         RetTy = getContext().getIntTypeForBitwidth(
779             getContext().toBits(sizeChars), /*Signed=*/false);
780       } else {
781         // Value is returned through parameter before the order.
782         RetTy = getContext().VoidTy;
783         Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
784       }
785     }
786     // order is always the last parameter
787     Args.add(RValue::get(Order),
788              getContext().IntTy);
789 
790     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
791     // The value is returned directly from the libcall.
792     if (HaveRetTy && !RetTy->isVoidType())
793       return Res;
794     // The value is returned via an explicit out param.
795     if (RetTy->isVoidType())
796       return RValue::get(nullptr);
797     // The value is returned directly for optimized libcalls but the caller is
798     // expected an out-param.
799     if (UseOptimizedLibcall) {
800       llvm::Value *ResVal = Res.getScalarVal();
801       llvm::StoreInst *StoreDest = Builder.CreateStore(
802           ResVal,
803           Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
804       StoreDest->setAlignment(Align);
805     }
806     return convertTempToRValue(Dest, RValTy, E->getExprLoc());
807   }
808 
809   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
810                  E->getOp() == AtomicExpr::AO__atomic_store ||
811                  E->getOp() == AtomicExpr::AO__atomic_store_n;
812   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
813                 E->getOp() == AtomicExpr::AO__atomic_load ||
814                 E->getOp() == AtomicExpr::AO__atomic_load_n;
815 
816   llvm::Type *ITy =
817       llvm::IntegerType::get(getLLVMContext(), Size * 8);
818   llvm::Value *OrigDest = GetDest();
819   Ptr = Builder.CreateBitCast(
820       Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
821   if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
822   if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
823   if (Dest && !E->isCmpXChg())
824     Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
825 
826   if (isa<llvm::ConstantInt>(Order)) {
827     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
828     switch (ord) {
829     case AtomicExpr::AO_ABI_memory_order_relaxed:
830       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
831                    Size, Align, llvm::Monotonic);
832       break;
833     case AtomicExpr::AO_ABI_memory_order_consume:
834     case AtomicExpr::AO_ABI_memory_order_acquire:
835       if (IsStore)
836         break; // Avoid crashing on code with undefined behavior
837       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
838                    Size, Align, llvm::Acquire);
839       break;
840     case AtomicExpr::AO_ABI_memory_order_release:
841       if (IsLoad)
842         break; // Avoid crashing on code with undefined behavior
843       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
844                    Size, Align, llvm::Release);
845       break;
846     case AtomicExpr::AO_ABI_memory_order_acq_rel:
847       if (IsLoad || IsStore)
848         break; // Avoid crashing on code with undefined behavior
849       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
850                    Size, Align, llvm::AcquireRelease);
851       break;
852     case AtomicExpr::AO_ABI_memory_order_seq_cst:
853       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
854                    Size, Align, llvm::SequentiallyConsistent);
855       break;
856     default: // invalid order
857       // We should not ever get here normally, but it's hard to
858       // enforce that in general.
859       break;
860     }
861     if (RValTy->isVoidType())
862       return RValue::get(nullptr);
863     return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
864   }
865 
866   // Long case, when Order isn't obviously constant.
867 
868   // Create all the relevant BB's
869   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
870                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
871                    *SeqCstBB = nullptr;
872   MonotonicBB = createBasicBlock("monotonic", CurFn);
873   if (!IsStore)
874     AcquireBB = createBasicBlock("acquire", CurFn);
875   if (!IsLoad)
876     ReleaseBB = createBasicBlock("release", CurFn);
877   if (!IsLoad && !IsStore)
878     AcqRelBB = createBasicBlock("acqrel", CurFn);
879   SeqCstBB = createBasicBlock("seqcst", CurFn);
880   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
881 
882   // Create the switch for the split
883   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
884   // doesn't matter unless someone is crazy enough to use something that
885   // doesn't fold to a constant for the ordering.
886   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
887   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
888 
889   // Emit all the different atomics
890   Builder.SetInsertPoint(MonotonicBB);
891   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
892                Size, Align, llvm::Monotonic);
893   Builder.CreateBr(ContBB);
894   if (!IsStore) {
895     Builder.SetInsertPoint(AcquireBB);
896     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
897                  Size, Align, llvm::Acquire);
898     Builder.CreateBr(ContBB);
899     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
900                 AcquireBB);
901     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
902                 AcquireBB);
903   }
904   if (!IsLoad) {
905     Builder.SetInsertPoint(ReleaseBB);
906     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
907                  Size, Align, llvm::Release);
908     Builder.CreateBr(ContBB);
909     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
910                 ReleaseBB);
911   }
912   if (!IsLoad && !IsStore) {
913     Builder.SetInsertPoint(AcqRelBB);
914     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
915                  Size, Align, llvm::AcquireRelease);
916     Builder.CreateBr(ContBB);
917     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
918                 AcqRelBB);
919   }
920   Builder.SetInsertPoint(SeqCstBB);
921   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
922                Size, Align, llvm::SequentiallyConsistent);
923   Builder.CreateBr(ContBB);
924   SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
925               SeqCstBB);
926 
927   // Cleanup and return
928   Builder.SetInsertPoint(ContBB);
929   if (RValTy->isVoidType())
930     return RValue::get(nullptr);
931   return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
932 }
933 
934 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
935   unsigned addrspace =
936     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
937   llvm::IntegerType *ty =
938     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
939   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
940 }
941 
942 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
943                                        AggValueSlot resultSlot,
944                                        SourceLocation loc) const {
945   if (LVal.isSimple()) {
946     if (EvaluationKind == TEK_Aggregate)
947       return resultSlot.asRValue();
948 
949     // Drill into the padding structure if we have one.
950     if (hasPadding())
951       addr = CGF.Builder.CreateStructGEP(addr, 0);
952 
953     // Otherwise, just convert the temporary to an r-value using the
954     // normal conversion routine.
955     return CGF.convertTempToRValue(addr, getValueType(), loc);
956   } else if (LVal.isBitField())
957     return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
958         addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
959   else if (LVal.isVectorElt())
960     return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
961                                                       LVal.getType(),
962                                                       LVal.getAlignment()),
963                                 loc);
964   assert(LVal.isExtVectorElt());
965   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
966       addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
967 }
968 
969 RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
970                                      AggValueSlot ResultSlot,
971                                      SourceLocation Loc) const {
972   assert(LVal.isSimple());
973   // Try not to in some easy cases.
974   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
975   if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
976     auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
977     if (ValTy->isIntegerTy()) {
978       assert(IntVal->getType() == ValTy && "Different integer types.");
979       return RValue::get(IntVal);
980     } else if (ValTy->isPointerTy())
981       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
982     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
983       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
984   }
985 
986   // Create a temporary.  This needs to be big enough to hold the
987   // atomic integer.
988   llvm::Value *Temp;
989   bool TempIsVolatile = false;
990   CharUnits TempAlignment;
991   if (getEvaluationKind() == TEK_Aggregate) {
992     assert(!ResultSlot.isIgnored());
993     Temp = ResultSlot.getAddr();
994     TempAlignment = getValueAlignment();
995     TempIsVolatile = ResultSlot.isVolatile();
996   } else {
997     Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
998     TempAlignment = getAtomicAlignment();
999   }
1000 
1001   // Slam the integer into the temporary.
1002   llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1003   CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1004       ->setVolatile(TempIsVolatile);
1005 
1006   return convertTempToRValue(Temp, ResultSlot, Loc);
1007 }
1008 
1009 /// Emit a load from an l-value of atomic type.  Note that the r-value
1010 /// we produce is an r-value of the atomic *value* type.
1011 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1012                                        AggValueSlot resultSlot) {
1013   AtomicInfo atomics(*this, src);
1014   LValue LVal = atomics.getAtomicLValue();
1015   llvm::Value *SrcAddr = nullptr;
1016   llvm::AllocaInst *NonSimpleTempAlloca = nullptr;
1017   if (LVal.isSimple())
1018     SrcAddr = LVal.getAddress();
1019   else {
1020     if (LVal.isBitField())
1021       SrcAddr = LVal.getBitFieldAddr();
1022     else if (LVal.isVectorElt())
1023       SrcAddr = LVal.getVectorAddr();
1024     else {
1025       assert(LVal.isExtVectorElt());
1026       SrcAddr = LVal.getExtVectorAddr();
1027     }
1028     NonSimpleTempAlloca = CreateTempAlloca(
1029         SrcAddr->getType()->getPointerElementType(), "atomic-load-temp");
1030     NonSimpleTempAlloca->setAlignment(getContext().toBits(src.getAlignment()));
1031   }
1032 
1033   // Check whether we should use a library call.
1034   if (atomics.shouldUseLibcall()) {
1035     llvm::Value *tempAddr;
1036     if (LVal.isSimple()) {
1037       if (!resultSlot.isIgnored()) {
1038         assert(atomics.getEvaluationKind() == TEK_Aggregate);
1039         tempAddr = resultSlot.getAddr();
1040       } else
1041         tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
1042     } else
1043       tempAddr = NonSimpleTempAlloca;
1044 
1045     // void __atomic_load(size_t size, void *mem, void *return, int order);
1046     CallArgList args;
1047     args.add(RValue::get(atomics.getAtomicSizeValue()),
1048              getContext().getSizeType());
1049     args.add(RValue::get(EmitCastToVoidPtr(SrcAddr)), getContext().VoidPtrTy);
1050     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), getContext().VoidPtrTy);
1051     args.add(RValue::get(llvm::ConstantInt::get(
1052                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1053              getContext().IntTy);
1054     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
1055 
1056     // Produce the r-value.
1057     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
1058   }
1059 
1060   // Okay, we're doing this natively.
1061   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr);
1062   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
1063   load->setAtomic(llvm::SequentiallyConsistent);
1064 
1065   // Other decoration.
1066   load->setAlignment(src.getAlignment().getQuantity());
1067   if (src.isVolatileQualified())
1068     load->setVolatile(true);
1069   if (src.getTBAAInfo())
1070     CGM.DecorateInstruction(load, src.getTBAAInfo());
1071 
1072   // If we're ignoring an aggregate return, don't do anything.
1073   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
1074     return RValue::getAggregate(nullptr, false);
1075 
1076   // Okay, turn that back into the original value type.
1077   if (src.isSimple())
1078     return atomics.convertIntToValue(load, resultSlot, loc);
1079 
1080   auto *IntAddr = atomics.emitCastToAtomicIntPointer(NonSimpleTempAlloca);
1081   Builder.CreateAlignedStore(load, IntAddr, src.getAlignment().getQuantity());
1082   return atomics.convertTempToRValue(NonSimpleTempAlloca, resultSlot, loc);
1083 }
1084 
1085 
1086 
1087 /// Copy an r-value into memory as part of storing to an atomic type.
1088 /// This needs to create a bit-pattern suitable for atomic operations.
1089 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1090   assert(LVal.isSimple());
1091   // If we have an r-value, the rvalue should be of the atomic type,
1092   // which means that the caller is responsible for having zeroed
1093   // any padding.  Just do an aggregate copy of that type.
1094   if (rvalue.isAggregate()) {
1095     CGF.EmitAggregateCopy(LVal.getAddress(),
1096                           rvalue.getAggregateAddr(),
1097                           getAtomicType(),
1098                           (rvalue.isVolatileQualified()
1099                            || LVal.isVolatileQualified()),
1100                           LVal.getAlignment());
1101     return;
1102   }
1103 
1104   // Okay, otherwise we're copying stuff.
1105 
1106   // Zero out the buffer if necessary.
1107   emitMemSetZeroIfNecessary();
1108 
1109   // Drill past the padding if present.
1110   LValue TempLVal = projectValue();
1111 
1112   // Okay, store the rvalue in.
1113   if (rvalue.isScalar()) {
1114     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1115   } else {
1116     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1117   }
1118 }
1119 
1120 
1121 /// Materialize an r-value into memory for the purposes of storing it
1122 /// to an atomic type.
1123 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1124   // Aggregate r-values are already in memory, and EmitAtomicStore
1125   // requires them to be values of the atomic type.
1126   if (rvalue.isAggregate())
1127     return rvalue.getAggregateAddr();
1128 
1129   // Otherwise, make a temporary and materialize into it.
1130   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1131   LValue tempLV =
1132       CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1133   AtomicInfo Atomics(CGF, tempLV);
1134   Atomics.emitCopyIntoMemory(rvalue);
1135   return temp;
1136 }
1137 
1138 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1139   // If we've got a scalar value of the right size, try to avoid going
1140   // through memory.
1141   if (RVal.isScalar() && !hasPadding()) {
1142     llvm::Value *Value = RVal.getScalarVal();
1143     if (isa<llvm::IntegerType>(Value->getType()))
1144       return Value;
1145     else {
1146       llvm::IntegerType *InputIntTy =
1147           llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
1148       if (isa<llvm::PointerType>(Value->getType()))
1149         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1150       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1151         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1152     }
1153   }
1154   // Otherwise, we need to go through memory.
1155   // Put the r-value in memory.
1156   llvm::Value *Addr = materializeRValue(RVal);
1157 
1158   // Cast the temporary to the atomic int type and pull a value out.
1159   Addr = emitCastToAtomicIntPointer(Addr);
1160   return CGF.Builder.CreateAlignedLoad(Addr,
1161                                        getAtomicAlignment().getQuantity());
1162 }
1163 
1164 /// Emit a store to an l-value of atomic type.
1165 ///
1166 /// Note that the r-value is expected to be an r-value *of the atomic
1167 /// type*; this means that for aggregate r-values, it should include
1168 /// storage for any padding that was necessary.
1169 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1170   // If this is an aggregate r-value, it should agree in type except
1171   // maybe for address-space qualification.
1172   assert(!rvalue.isAggregate() ||
1173          rvalue.getAggregateAddr()->getType()->getPointerElementType()
1174            == dest.getAddress()->getType()->getPointerElementType());
1175 
1176   AtomicInfo atomics(*this, dest);
1177 
1178   // If this is an initialization, just put the value there normally.
1179   if (isInit) {
1180     atomics.emitCopyIntoMemory(rvalue);
1181     return;
1182   }
1183 
1184   // Check whether we should use a library call.
1185   if (atomics.shouldUseLibcall()) {
1186     // Produce a source address.
1187     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1188 
1189     // void __atomic_store(size_t size, void *mem, void *val, int order)
1190     CallArgList args;
1191     args.add(RValue::get(atomics.getAtomicSizeValue()),
1192              getContext().getSizeType());
1193     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1194              getContext().VoidPtrTy);
1195     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1196              getContext().VoidPtrTy);
1197     args.add(RValue::get(llvm::ConstantInt::get(
1198                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1199              getContext().IntTy);
1200     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1201     return;
1202   }
1203 
1204   // Okay, we're doing this natively.
1205   llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1206 
1207   // Do the atomic store.
1208   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1209   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1210 
1211   // Initializations don't need to be atomic.
1212   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1213 
1214   // Other decoration.
1215   store->setAlignment(dest.getAlignment().getQuantity());
1216   if (dest.isVolatileQualified())
1217     store->setVolatile(true);
1218   if (dest.getTBAAInfo())
1219     CGM.DecorateInstruction(store, dest.getTBAAInfo());
1220 }
1221 
1222 /// Emit a compare-and-exchange op for atomic type.
1223 ///
1224 std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
1225     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1226     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1227     AggValueSlot Slot) {
1228   // If this is an aggregate r-value, it should agree in type except
1229   // maybe for address-space qualification.
1230   assert(!Expected.isAggregate() ||
1231          Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1232              Obj.getAddress()->getType()->getPointerElementType());
1233   assert(!Desired.isAggregate() ||
1234          Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1235              Obj.getAddress()->getType()->getPointerElementType());
1236   AtomicInfo Atomics(*this, Obj);
1237 
1238   if (Failure >= Success)
1239     // Don't assert on undefined behavior.
1240     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1241 
1242   auto Alignment = Atomics.getValueAlignment();
1243   // Check whether we should use a library call.
1244   if (Atomics.shouldUseLibcall()) {
1245     auto *ExpectedAddr = Atomics.materializeRValue(Expected);
1246     // Produce a source address.
1247     auto *DesiredAddr = Atomics.materializeRValue(Desired);
1248     // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1249     // void *desired, int success, int failure);
1250     CallArgList Args;
1251     Args.add(RValue::get(Atomics.getAtomicSizeValue()),
1252              getContext().getSizeType());
1253     Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
1254              getContext().VoidPtrTy);
1255     Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
1256              getContext().VoidPtrTy);
1257     Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
1258              getContext().VoidPtrTy);
1259     Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
1260              getContext().IntTy);
1261     Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
1262              getContext().IntTy);
1263     auto SuccessFailureRVal = emitAtomicLibcall(
1264         *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
1265     auto *PreviousVal =
1266         Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
1267     return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
1268   }
1269 
1270   // If we've got a scalar value of the right size, try to avoid going
1271   // through memory.
1272   auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
1273   auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
1274 
1275   // Do the atomic store.
1276   auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
1277   auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
1278                                           Success, Failure);
1279   // Other decoration.
1280   Inst->setVolatile(Obj.isVolatileQualified());
1281   Inst->setWeak(IsWeak);
1282 
1283   // Okay, turn that back into the original value type.
1284   auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1285   auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1286   return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
1287                         RValue::get(SuccessFailureVal));
1288 }
1289 
1290 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1291   AtomicInfo atomics(*this, dest);
1292 
1293   switch (atomics.getEvaluationKind()) {
1294   case TEK_Scalar: {
1295     llvm::Value *value = EmitScalarExpr(init);
1296     atomics.emitCopyIntoMemory(RValue::get(value));
1297     return;
1298   }
1299 
1300   case TEK_Complex: {
1301     ComplexPairTy value = EmitComplexExpr(init);
1302     atomics.emitCopyIntoMemory(RValue::getComplex(value));
1303     return;
1304   }
1305 
1306   case TEK_Aggregate: {
1307     // Fix up the destination if the initializer isn't an expression
1308     // of atomic type.
1309     bool Zeroed = false;
1310     if (!init->getType()->isAtomicType()) {
1311       Zeroed = atomics.emitMemSetZeroIfNecessary();
1312       dest = atomics.projectValue();
1313     }
1314 
1315     // Evaluate the expression directly into the destination.
1316     AggValueSlot slot = AggValueSlot::forLValue(dest,
1317                                         AggValueSlot::IsNotDestructed,
1318                                         AggValueSlot::DoesNotNeedGCBarriers,
1319                                         AggValueSlot::IsNotAliased,
1320                                         Zeroed ? AggValueSlot::IsZeroed :
1321                                                  AggValueSlot::IsNotZeroed);
1322 
1323     EmitAggExpr(init, slot);
1324     return;
1325   }
1326   }
1327   llvm_unreachable("bad evaluation kind");
1328 }
1329