1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/CodeGen/CGFunctionInfo.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Operator.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28   class AtomicInfo {
29     CodeGenFunction &CGF;
30     QualType AtomicTy;
31     QualType ValueTy;
32     uint64_t AtomicSizeInBits;
33     uint64_t ValueSizeInBits;
34     CharUnits AtomicAlign;
35     CharUnits ValueAlign;
36     CharUnits LValueAlign;
37     TypeEvaluationKind EvaluationKind;
38     bool UseLibcall;
39   public:
40     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41       assert(lvalue.isSimple());
42 
43       AtomicTy = lvalue.getType();
44       ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45       EvaluationKind = CGF.getEvaluationKind(ValueTy);
46 
47       ASTContext &C = CGF.getContext();
48 
49       uint64_t ValueAlignInBits;
50       uint64_t AtomicAlignInBits;
51       TypeInfo ValueTI = C.getTypeInfo(ValueTy);
52       ValueSizeInBits = ValueTI.Width;
53       ValueAlignInBits = ValueTI.Align;
54 
55       TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
56       AtomicSizeInBits = AtomicTI.Width;
57       AtomicAlignInBits = AtomicTI.Align;
58 
59       assert(ValueSizeInBits <= AtomicSizeInBits);
60       assert(ValueAlignInBits <= AtomicAlignInBits);
61 
62       AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
63       ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
64       if (lvalue.getAlignment().isZero())
65         lvalue.setAlignment(AtomicAlign);
66 
67       UseLibcall =
68         (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
69          AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
70     }
71 
72     QualType getAtomicType() const { return AtomicTy; }
73     QualType getValueType() const { return ValueTy; }
74     CharUnits getAtomicAlignment() const { return AtomicAlign; }
75     CharUnits getValueAlignment() const { return ValueAlign; }
76     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
77     uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
78     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
79     bool shouldUseLibcall() const { return UseLibcall; }
80 
81     /// Is the atomic size larger than the underlying value type?
82     ///
83     /// Note that the absence of padding does not mean that atomic
84     /// objects are completely interchangeable with non-atomic
85     /// objects: we might have promoted the alignment of a type
86     /// without making it bigger.
87     bool hasPadding() const {
88       return (ValueSizeInBits != AtomicSizeInBits);
89     }
90 
91     bool emitMemSetZeroIfNecessary(LValue dest) const;
92 
93     llvm::Value *getAtomicSizeValue() const {
94       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
95       return CGF.CGM.getSize(size);
96     }
97 
98     /// Cast the given pointer to an integer pointer suitable for
99     /// atomic operations.
100     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
101 
102     /// Turn an atomic-layout object into an r-value.
103     RValue convertTempToRValue(llvm::Value *addr,
104                                AggValueSlot resultSlot,
105                                SourceLocation loc) const;
106 
107     /// Copy an atomic r-value into atomic-layout memory.
108     void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
109 
110     /// Project an l-value down to the value field.
111     LValue projectValue(LValue lvalue) const {
112       llvm::Value *addr = lvalue.getAddress();
113       if (hasPadding())
114         addr = CGF.Builder.CreateStructGEP(addr, 0);
115 
116       return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
117                               CGF.getContext(), lvalue.getTBAAInfo());
118     }
119 
120     /// Materialize an atomic r-value in atomic-layout memory.
121     llvm::Value *materializeRValue(RValue rvalue) const;
122 
123   private:
124     bool requiresMemSetZero(llvm::Type *type) const;
125   };
126 }
127 
128 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
129                                 StringRef fnName,
130                                 QualType resultType,
131                                 CallArgList &args) {
132   const CGFunctionInfo &fnInfo =
133     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
134             FunctionType::ExtInfo(), RequiredArgs::All);
135   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
136   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
137   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
138 }
139 
140 /// Does a store of the given IR type modify the full expected width?
141 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
142                            uint64_t expectedSize) {
143   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
144 }
145 
146 /// Does the atomic type require memsetting to zero before initialization?
147 ///
148 /// The IR type is provided as a way of making certain queries faster.
149 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
150   // If the atomic type has size padding, we definitely need a memset.
151   if (hasPadding()) return true;
152 
153   // Otherwise, do some simple heuristics to try to avoid it:
154   switch (getEvaluationKind()) {
155   // For scalars and complexes, check whether the store size of the
156   // type uses the full size.
157   case TEK_Scalar:
158     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
159   case TEK_Complex:
160     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
161                            AtomicSizeInBits / 2);
162 
163   // Padding in structs has an undefined bit pattern.  User beware.
164   case TEK_Aggregate:
165     return false;
166   }
167   llvm_unreachable("bad evaluation kind");
168 }
169 
170 bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
171   llvm::Value *addr = dest.getAddress();
172   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
173     return false;
174 
175   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
176                            AtomicSizeInBits / 8,
177                            dest.getAlignment().getQuantity());
178   return true;
179 }
180 
181 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
182                               llvm::Value *Dest, llvm::Value *Ptr,
183                               llvm::Value *Val1, llvm::Value *Val2,
184                               uint64_t Size, unsigned Align,
185                               llvm::AtomicOrdering SuccessOrder,
186                               llvm::AtomicOrdering FailureOrder) {
187   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
188   llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
189   Expected->setAlignment(Align);
190   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
191   Desired->setAlignment(Align);
192 
193   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
194       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
195   Pair->setVolatile(E->isVolatile());
196   Pair->setWeak(IsWeak);
197 
198   // Cmp holds the result of the compare-exchange operation: true on success,
199   // false on failure.
200   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
201   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
202 
203   // This basic block is used to hold the store instruction if the operation
204   // failed.
205   llvm::BasicBlock *StoreExpectedBB =
206       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
207 
208   // This basic block is the exit point of the operation, we should end up
209   // here regardless of whether or not the operation succeeded.
210   llvm::BasicBlock *ContinueBB =
211       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
212 
213   // Update Expected if Expected isn't equal to Old, otherwise branch to the
214   // exit point.
215   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
216 
217   CGF.Builder.SetInsertPoint(StoreExpectedBB);
218   // Update the memory at Expected with Old's value.
219   llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
220   StoreExpected->setAlignment(Align);
221   // Finally, branch to the exit point.
222   CGF.Builder.CreateBr(ContinueBB);
223 
224   CGF.Builder.SetInsertPoint(ContinueBB);
225   // Update the memory at Dest with Cmp's value.
226   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
227   return;
228 }
229 
230 /// Given an ordering required on success, emit all possible cmpxchg
231 /// instructions to cope with the provided (but possibly only dynamically known)
232 /// FailureOrder.
233 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
234                                         bool IsWeak, llvm::Value *Dest,
235                                         llvm::Value *Ptr, llvm::Value *Val1,
236                                         llvm::Value *Val2,
237                                         llvm::Value *FailureOrderVal,
238                                         uint64_t Size, unsigned Align,
239                                         llvm::AtomicOrdering SuccessOrder) {
240   llvm::AtomicOrdering FailureOrder;
241   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
242     switch (FO->getSExtValue()) {
243     default:
244       FailureOrder = llvm::Monotonic;
245       break;
246     case AtomicExpr::AO_ABI_memory_order_consume:
247     case AtomicExpr::AO_ABI_memory_order_acquire:
248       FailureOrder = llvm::Acquire;
249       break;
250     case AtomicExpr::AO_ABI_memory_order_seq_cst:
251       FailureOrder = llvm::SequentiallyConsistent;
252       break;
253     }
254     if (FailureOrder >= SuccessOrder) {
255       // Don't assert on undefined behaviour.
256       FailureOrder =
257         llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
258     }
259     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
260                       SuccessOrder, FailureOrder);
261     return;
262   }
263 
264   // Create all the relevant BB's
265   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
266                    *SeqCstBB = nullptr;
267   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
268   if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
269     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
270   if (SuccessOrder == llvm::SequentiallyConsistent)
271     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
272 
273   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
274 
275   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
276 
277   // Emit all the different atomics
278 
279   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
280   // doesn't matter unless someone is crazy enough to use something that
281   // doesn't fold to a constant for the ordering.
282   CGF.Builder.SetInsertPoint(MonotonicBB);
283   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
284                     Size, Align, SuccessOrder, llvm::Monotonic);
285   CGF.Builder.CreateBr(ContBB);
286 
287   if (AcquireBB) {
288     CGF.Builder.SetInsertPoint(AcquireBB);
289     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
290                       Size, Align, SuccessOrder, llvm::Acquire);
291     CGF.Builder.CreateBr(ContBB);
292     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
293                 AcquireBB);
294     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
295                 AcquireBB);
296   }
297   if (SeqCstBB) {
298     CGF.Builder.SetInsertPoint(SeqCstBB);
299     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
300                       Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
301     CGF.Builder.CreateBr(ContBB);
302     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
303                 SeqCstBB);
304   }
305 
306   CGF.Builder.SetInsertPoint(ContBB);
307 }
308 
309 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
310                          llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
311                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
312                          uint64_t Size, unsigned Align,
313                          llvm::AtomicOrdering Order) {
314   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
315   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
316 
317   switch (E->getOp()) {
318   case AtomicExpr::AO__c11_atomic_init:
319     llvm_unreachable("Already handled!");
320 
321   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
322     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
323                                 FailureOrder, Size, Align, Order);
324     return;
325   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
326     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
327                                 FailureOrder, Size, Align, Order);
328     return;
329   case AtomicExpr::AO__atomic_compare_exchange:
330   case AtomicExpr::AO__atomic_compare_exchange_n: {
331     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
332       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
333                                   Val1, Val2, FailureOrder, Size, Align, Order);
334     } else {
335       // Create all the relevant BB's
336       llvm::BasicBlock *StrongBB =
337           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
338       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
339       llvm::BasicBlock *ContBB =
340           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
341 
342       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
343       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
344 
345       CGF.Builder.SetInsertPoint(StrongBB);
346       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
347                                   FailureOrder, Size, Align, Order);
348       CGF.Builder.CreateBr(ContBB);
349 
350       CGF.Builder.SetInsertPoint(WeakBB);
351       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
352                                   FailureOrder, Size, Align, Order);
353       CGF.Builder.CreateBr(ContBB);
354 
355       CGF.Builder.SetInsertPoint(ContBB);
356     }
357     return;
358   }
359   case AtomicExpr::AO__c11_atomic_load:
360   case AtomicExpr::AO__atomic_load_n:
361   case AtomicExpr::AO__atomic_load: {
362     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
363     Load->setAtomic(Order);
364     Load->setAlignment(Size);
365     Load->setVolatile(E->isVolatile());
366     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
367     StoreDest->setAlignment(Align);
368     return;
369   }
370 
371   case AtomicExpr::AO__c11_atomic_store:
372   case AtomicExpr::AO__atomic_store:
373   case AtomicExpr::AO__atomic_store_n: {
374     assert(!Dest && "Store does not return a value");
375     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
376     LoadVal1->setAlignment(Align);
377     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
378     Store->setAtomic(Order);
379     Store->setAlignment(Size);
380     Store->setVolatile(E->isVolatile());
381     return;
382   }
383 
384   case AtomicExpr::AO__c11_atomic_exchange:
385   case AtomicExpr::AO__atomic_exchange_n:
386   case AtomicExpr::AO__atomic_exchange:
387     Op = llvm::AtomicRMWInst::Xchg;
388     break;
389 
390   case AtomicExpr::AO__atomic_add_fetch:
391     PostOp = llvm::Instruction::Add;
392     // Fall through.
393   case AtomicExpr::AO__c11_atomic_fetch_add:
394   case AtomicExpr::AO__atomic_fetch_add:
395     Op = llvm::AtomicRMWInst::Add;
396     break;
397 
398   case AtomicExpr::AO__atomic_sub_fetch:
399     PostOp = llvm::Instruction::Sub;
400     // Fall through.
401   case AtomicExpr::AO__c11_atomic_fetch_sub:
402   case AtomicExpr::AO__atomic_fetch_sub:
403     Op = llvm::AtomicRMWInst::Sub;
404     break;
405 
406   case AtomicExpr::AO__atomic_and_fetch:
407     PostOp = llvm::Instruction::And;
408     // Fall through.
409   case AtomicExpr::AO__c11_atomic_fetch_and:
410   case AtomicExpr::AO__atomic_fetch_and:
411     Op = llvm::AtomicRMWInst::And;
412     break;
413 
414   case AtomicExpr::AO__atomic_or_fetch:
415     PostOp = llvm::Instruction::Or;
416     // Fall through.
417   case AtomicExpr::AO__c11_atomic_fetch_or:
418   case AtomicExpr::AO__atomic_fetch_or:
419     Op = llvm::AtomicRMWInst::Or;
420     break;
421 
422   case AtomicExpr::AO__atomic_xor_fetch:
423     PostOp = llvm::Instruction::Xor;
424     // Fall through.
425   case AtomicExpr::AO__c11_atomic_fetch_xor:
426   case AtomicExpr::AO__atomic_fetch_xor:
427     Op = llvm::AtomicRMWInst::Xor;
428     break;
429 
430   case AtomicExpr::AO__atomic_nand_fetch:
431     PostOp = llvm::Instruction::And;
432     // Fall through.
433   case AtomicExpr::AO__atomic_fetch_nand:
434     Op = llvm::AtomicRMWInst::Nand;
435     break;
436   }
437 
438   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
439   LoadVal1->setAlignment(Align);
440   llvm::AtomicRMWInst *RMWI =
441       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
442   RMWI->setVolatile(E->isVolatile());
443 
444   // For __atomic_*_fetch operations, perform the operation again to
445   // determine the value which was written.
446   llvm::Value *Result = RMWI;
447   if (PostOp)
448     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
449   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
450     Result = CGF.Builder.CreateNot(Result);
451   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
452   StoreDest->setAlignment(Align);
453 }
454 
455 // This function emits any expression (scalar, complex, or aggregate)
456 // into a temporary alloca.
457 static llvm::Value *
458 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
459   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
460   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
461                        /*Init*/ true);
462   return DeclPtr;
463 }
464 
465 static void
466 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
467                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
468                   SourceLocation Loc) {
469   if (UseOptimizedLibcall) {
470     // Load value and pass it to the function directly.
471     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
472     Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
473     Args.add(RValue::get(Val), ValTy);
474   } else {
475     // Non-optimized functions always take a reference.
476     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
477                          CGF.getContext().VoidPtrTy);
478   }
479 }
480 
481 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
482   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
483   QualType MemTy = AtomicTy;
484   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
485     MemTy = AT->getValueType();
486   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
487   uint64_t Size = sizeChars.getQuantity();
488   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
489   unsigned Align = alignChars.getQuantity();
490   unsigned MaxInlineWidthInBits =
491     getTarget().getMaxAtomicInlineWidth();
492   bool UseLibcall = (Size != Align ||
493                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
494 
495   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
496               *Val2 = nullptr;
497   llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
498 
499   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
500     assert(!Dest && "Init does not return a value");
501     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
502     EmitAtomicInit(E->getVal1(), lvalue);
503     return RValue::get(nullptr);
504   }
505 
506   llvm::Value *Order = EmitScalarExpr(E->getOrder());
507 
508   switch (E->getOp()) {
509   case AtomicExpr::AO__c11_atomic_init:
510     llvm_unreachable("Already handled!");
511 
512   case AtomicExpr::AO__c11_atomic_load:
513   case AtomicExpr::AO__atomic_load_n:
514     break;
515 
516   case AtomicExpr::AO__atomic_load:
517     Dest = EmitScalarExpr(E->getVal1());
518     break;
519 
520   case AtomicExpr::AO__atomic_store:
521     Val1 = EmitScalarExpr(E->getVal1());
522     break;
523 
524   case AtomicExpr::AO__atomic_exchange:
525     Val1 = EmitScalarExpr(E->getVal1());
526     Dest = EmitScalarExpr(E->getVal2());
527     break;
528 
529   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
530   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
531   case AtomicExpr::AO__atomic_compare_exchange_n:
532   case AtomicExpr::AO__atomic_compare_exchange:
533     Val1 = EmitScalarExpr(E->getVal1());
534     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
535       Val2 = EmitScalarExpr(E->getVal2());
536     else
537       Val2 = EmitValToTemp(*this, E->getVal2());
538     OrderFail = EmitScalarExpr(E->getOrderFail());
539     if (E->getNumSubExprs() == 6)
540       IsWeak = EmitScalarExpr(E->getWeak());
541     break;
542 
543   case AtomicExpr::AO__c11_atomic_fetch_add:
544   case AtomicExpr::AO__c11_atomic_fetch_sub:
545     if (MemTy->isPointerType()) {
546       // For pointer arithmetic, we're required to do a bit of math:
547       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
548       // ... but only for the C11 builtins. The GNU builtins expect the
549       // user to multiply by sizeof(T).
550       QualType Val1Ty = E->getVal1()->getType();
551       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
552       CharUnits PointeeIncAmt =
553           getContext().getTypeSizeInChars(MemTy->getPointeeType());
554       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
555       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
556       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
557       break;
558     }
559     // Fall through.
560   case AtomicExpr::AO__atomic_fetch_add:
561   case AtomicExpr::AO__atomic_fetch_sub:
562   case AtomicExpr::AO__atomic_add_fetch:
563   case AtomicExpr::AO__atomic_sub_fetch:
564   case AtomicExpr::AO__c11_atomic_store:
565   case AtomicExpr::AO__c11_atomic_exchange:
566   case AtomicExpr::AO__atomic_store_n:
567   case AtomicExpr::AO__atomic_exchange_n:
568   case AtomicExpr::AO__c11_atomic_fetch_and:
569   case AtomicExpr::AO__c11_atomic_fetch_or:
570   case AtomicExpr::AO__c11_atomic_fetch_xor:
571   case AtomicExpr::AO__atomic_fetch_and:
572   case AtomicExpr::AO__atomic_fetch_or:
573   case AtomicExpr::AO__atomic_fetch_xor:
574   case AtomicExpr::AO__atomic_fetch_nand:
575   case AtomicExpr::AO__atomic_and_fetch:
576   case AtomicExpr::AO__atomic_or_fetch:
577   case AtomicExpr::AO__atomic_xor_fetch:
578   case AtomicExpr::AO__atomic_nand_fetch:
579     Val1 = EmitValToTemp(*this, E->getVal1());
580     break;
581   }
582 
583   if (!E->getType()->isVoidType() && !Dest)
584     Dest = CreateMemTemp(E->getType(), ".atomicdst");
585 
586   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
587   if (UseLibcall) {
588     bool UseOptimizedLibcall = false;
589     switch (E->getOp()) {
590     case AtomicExpr::AO__c11_atomic_fetch_add:
591     case AtomicExpr::AO__atomic_fetch_add:
592     case AtomicExpr::AO__c11_atomic_fetch_and:
593     case AtomicExpr::AO__atomic_fetch_and:
594     case AtomicExpr::AO__c11_atomic_fetch_or:
595     case AtomicExpr::AO__atomic_fetch_or:
596     case AtomicExpr::AO__c11_atomic_fetch_sub:
597     case AtomicExpr::AO__atomic_fetch_sub:
598     case AtomicExpr::AO__c11_atomic_fetch_xor:
599     case AtomicExpr::AO__atomic_fetch_xor:
600       // For these, only library calls for certain sizes exist.
601       UseOptimizedLibcall = true;
602       break;
603     default:
604       // Only use optimized library calls for sizes for which they exist.
605       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
606         UseOptimizedLibcall = true;
607       break;
608     }
609 
610     CallArgList Args;
611     if (!UseOptimizedLibcall) {
612       // For non-optimized library calls, the size is the first parameter
613       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
614                getContext().getSizeType());
615     }
616     // Atomic address is the first or second parameter
617     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
618 
619     std::string LibCallName;
620     QualType LoweredMemTy =
621       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
622     QualType RetTy;
623     bool HaveRetTy = false;
624     switch (E->getOp()) {
625     // There is only one libcall for compare an exchange, because there is no
626     // optimisation benefit possible from a libcall version of a weak compare
627     // and exchange.
628     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
629     //                                void *desired, int success, int failure)
630     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
631     //                                  int success, int failure)
632     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
633     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
634     case AtomicExpr::AO__atomic_compare_exchange:
635     case AtomicExpr::AO__atomic_compare_exchange_n:
636       LibCallName = "__atomic_compare_exchange";
637       RetTy = getContext().BoolTy;
638       HaveRetTy = true;
639       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
640       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
641                         E->getExprLoc());
642       Args.add(RValue::get(Order), getContext().IntTy);
643       Order = OrderFail;
644       break;
645     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
646     //                        int order)
647     // T __atomic_exchange_N(T *mem, T val, int order)
648     case AtomicExpr::AO__c11_atomic_exchange:
649     case AtomicExpr::AO__atomic_exchange_n:
650     case AtomicExpr::AO__atomic_exchange:
651       LibCallName = "__atomic_exchange";
652       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
653                         E->getExprLoc());
654       break;
655     // void __atomic_store(size_t size, void *mem, void *val, int order)
656     // void __atomic_store_N(T *mem, T val, int order)
657     case AtomicExpr::AO__c11_atomic_store:
658     case AtomicExpr::AO__atomic_store:
659     case AtomicExpr::AO__atomic_store_n:
660       LibCallName = "__atomic_store";
661       RetTy = getContext().VoidTy;
662       HaveRetTy = true;
663       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
664                         E->getExprLoc());
665       break;
666     // void __atomic_load(size_t size, void *mem, void *return, int order)
667     // T __atomic_load_N(T *mem, int order)
668     case AtomicExpr::AO__c11_atomic_load:
669     case AtomicExpr::AO__atomic_load:
670     case AtomicExpr::AO__atomic_load_n:
671       LibCallName = "__atomic_load";
672       break;
673     // T __atomic_fetch_add_N(T *mem, T val, int order)
674     case AtomicExpr::AO__c11_atomic_fetch_add:
675     case AtomicExpr::AO__atomic_fetch_add:
676       LibCallName = "__atomic_fetch_add";
677       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
678                         E->getExprLoc());
679       break;
680     // T __atomic_fetch_and_N(T *mem, T val, int order)
681     case AtomicExpr::AO__c11_atomic_fetch_and:
682     case AtomicExpr::AO__atomic_fetch_and:
683       LibCallName = "__atomic_fetch_and";
684       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
685                         E->getExprLoc());
686       break;
687     // T __atomic_fetch_or_N(T *mem, T val, int order)
688     case AtomicExpr::AO__c11_atomic_fetch_or:
689     case AtomicExpr::AO__atomic_fetch_or:
690       LibCallName = "__atomic_fetch_or";
691       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
692                         E->getExprLoc());
693       break;
694     // T __atomic_fetch_sub_N(T *mem, T val, int order)
695     case AtomicExpr::AO__c11_atomic_fetch_sub:
696     case AtomicExpr::AO__atomic_fetch_sub:
697       LibCallName = "__atomic_fetch_sub";
698       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
699                         E->getExprLoc());
700       break;
701     // T __atomic_fetch_xor_N(T *mem, T val, int order)
702     case AtomicExpr::AO__c11_atomic_fetch_xor:
703     case AtomicExpr::AO__atomic_fetch_xor:
704       LibCallName = "__atomic_fetch_xor";
705       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
706                         E->getExprLoc());
707       break;
708     default: return EmitUnsupportedRValue(E, "atomic library call");
709     }
710 
711     // Optimized functions have the size in their name.
712     if (UseOptimizedLibcall)
713       LibCallName += "_" + llvm::utostr(Size);
714     // By default, assume we return a value of the atomic type.
715     if (!HaveRetTy) {
716       if (UseOptimizedLibcall) {
717         // Value is returned directly.
718         RetTy = MemTy;
719       } else {
720         // Value is returned through parameter before the order.
721         RetTy = getContext().VoidTy;
722         Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
723                  getContext().VoidPtrTy);
724       }
725     }
726     // order is always the last parameter
727     Args.add(RValue::get(Order),
728              getContext().IntTy);
729 
730     const CGFunctionInfo &FuncInfo =
731         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
732             FunctionType::ExtInfo(), RequiredArgs::All);
733     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
734     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
735     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
736     if (!RetTy->isVoidType())
737       return Res;
738     if (E->getType()->isVoidType())
739       return RValue::get(nullptr);
740     return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
741   }
742 
743   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
744                  E->getOp() == AtomicExpr::AO__atomic_store ||
745                  E->getOp() == AtomicExpr::AO__atomic_store_n;
746   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
747                 E->getOp() == AtomicExpr::AO__atomic_load ||
748                 E->getOp() == AtomicExpr::AO__atomic_load_n;
749 
750   llvm::Type *IPtrTy =
751       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
752   llvm::Value *OrigDest = Dest;
753   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
754   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
755   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
756   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
757 
758   if (isa<llvm::ConstantInt>(Order)) {
759     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
760     switch (ord) {
761     case AtomicExpr::AO_ABI_memory_order_relaxed:
762       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
763                    Size, Align, llvm::Monotonic);
764       break;
765     case AtomicExpr::AO_ABI_memory_order_consume:
766     case AtomicExpr::AO_ABI_memory_order_acquire:
767       if (IsStore)
768         break; // Avoid crashing on code with undefined behavior
769       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
770                    Size, Align, llvm::Acquire);
771       break;
772     case AtomicExpr::AO_ABI_memory_order_release:
773       if (IsLoad)
774         break; // Avoid crashing on code with undefined behavior
775       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
776                    Size, Align, llvm::Release);
777       break;
778     case AtomicExpr::AO_ABI_memory_order_acq_rel:
779       if (IsLoad || IsStore)
780         break; // Avoid crashing on code with undefined behavior
781       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
782                    Size, Align, llvm::AcquireRelease);
783       break;
784     case AtomicExpr::AO_ABI_memory_order_seq_cst:
785       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
786                    Size, Align, llvm::SequentiallyConsistent);
787       break;
788     default: // invalid order
789       // We should not ever get here normally, but it's hard to
790       // enforce that in general.
791       break;
792     }
793     if (E->getType()->isVoidType())
794       return RValue::get(nullptr);
795     return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
796   }
797 
798   // Long case, when Order isn't obviously constant.
799 
800   // Create all the relevant BB's
801   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
802                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
803                    *SeqCstBB = nullptr;
804   MonotonicBB = createBasicBlock("monotonic", CurFn);
805   if (!IsStore)
806     AcquireBB = createBasicBlock("acquire", CurFn);
807   if (!IsLoad)
808     ReleaseBB = createBasicBlock("release", CurFn);
809   if (!IsLoad && !IsStore)
810     AcqRelBB = createBasicBlock("acqrel", CurFn);
811   SeqCstBB = createBasicBlock("seqcst", CurFn);
812   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
813 
814   // Create the switch for the split
815   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
816   // doesn't matter unless someone is crazy enough to use something that
817   // doesn't fold to a constant for the ordering.
818   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
819   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
820 
821   // Emit all the different atomics
822   Builder.SetInsertPoint(MonotonicBB);
823   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
824                Size, Align, llvm::Monotonic);
825   Builder.CreateBr(ContBB);
826   if (!IsStore) {
827     Builder.SetInsertPoint(AcquireBB);
828     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
829                  Size, Align, llvm::Acquire);
830     Builder.CreateBr(ContBB);
831     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
832                 AcquireBB);
833     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
834                 AcquireBB);
835   }
836   if (!IsLoad) {
837     Builder.SetInsertPoint(ReleaseBB);
838     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
839                  Size, Align, llvm::Release);
840     Builder.CreateBr(ContBB);
841     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
842                 ReleaseBB);
843   }
844   if (!IsLoad && !IsStore) {
845     Builder.SetInsertPoint(AcqRelBB);
846     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
847                  Size, Align, llvm::AcquireRelease);
848     Builder.CreateBr(ContBB);
849     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
850                 AcqRelBB);
851   }
852   Builder.SetInsertPoint(SeqCstBB);
853   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
854                Size, Align, llvm::SequentiallyConsistent);
855   Builder.CreateBr(ContBB);
856   SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
857               SeqCstBB);
858 
859   // Cleanup and return
860   Builder.SetInsertPoint(ContBB);
861   if (E->getType()->isVoidType())
862     return RValue::get(nullptr);
863   return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
864 }
865 
866 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
867   unsigned addrspace =
868     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
869   llvm::IntegerType *ty =
870     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
871   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
872 }
873 
874 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
875                                        AggValueSlot resultSlot,
876                                        SourceLocation loc) const {
877   if (EvaluationKind == TEK_Aggregate)
878     return resultSlot.asRValue();
879 
880   // Drill into the padding structure if we have one.
881   if (hasPadding())
882     addr = CGF.Builder.CreateStructGEP(addr, 0);
883 
884   // Otherwise, just convert the temporary to an r-value using the
885   // normal conversion routine.
886   return CGF.convertTempToRValue(addr, getValueType(), loc);
887 }
888 
889 /// Emit a load from an l-value of atomic type.  Note that the r-value
890 /// we produce is an r-value of the atomic *value* type.
891 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
892                                        AggValueSlot resultSlot) {
893   AtomicInfo atomics(*this, src);
894 
895   // Check whether we should use a library call.
896   if (atomics.shouldUseLibcall()) {
897     llvm::Value *tempAddr;
898     if (!resultSlot.isIgnored()) {
899       assert(atomics.getEvaluationKind() == TEK_Aggregate);
900       tempAddr = resultSlot.getAddr();
901     } else {
902       tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
903     }
904 
905     // void __atomic_load(size_t size, void *mem, void *return, int order);
906     CallArgList args;
907     args.add(RValue::get(atomics.getAtomicSizeValue()),
908              getContext().getSizeType());
909     args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
910              getContext().VoidPtrTy);
911     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
912              getContext().VoidPtrTy);
913     args.add(RValue::get(llvm::ConstantInt::get(
914                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
915              getContext().IntTy);
916     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
917 
918     // Produce the r-value.
919     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
920   }
921 
922   // Okay, we're doing this natively.
923   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
924   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
925   load->setAtomic(llvm::SequentiallyConsistent);
926 
927   // Other decoration.
928   load->setAlignment(src.getAlignment().getQuantity());
929   if (src.isVolatileQualified())
930     load->setVolatile(true);
931   if (src.getTBAAInfo())
932     CGM.DecorateInstruction(load, src.getTBAAInfo());
933 
934   // Okay, turn that back into the original value type.
935   QualType valueType = atomics.getValueType();
936   llvm::Value *result = load;
937 
938   // If we're ignoring an aggregate return, don't do anything.
939   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
940     return RValue::getAggregate(nullptr, false);
941 
942   // The easiest way to do this this is to go through memory, but we
943   // try not to in some easy cases.
944   if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
945     llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
946     if (isa<llvm::IntegerType>(resultTy)) {
947       assert(result->getType() == resultTy);
948       result = EmitFromMemory(result, valueType);
949     } else if (isa<llvm::PointerType>(resultTy)) {
950       result = Builder.CreateIntToPtr(result, resultTy);
951     } else {
952       result = Builder.CreateBitCast(result, resultTy);
953     }
954     return RValue::get(result);
955   }
956 
957   // Create a temporary.  This needs to be big enough to hold the
958   // atomic integer.
959   llvm::Value *temp;
960   bool tempIsVolatile = false;
961   CharUnits tempAlignment;
962   if (atomics.getEvaluationKind() == TEK_Aggregate) {
963     assert(!resultSlot.isIgnored());
964     temp = resultSlot.getAddr();
965     tempAlignment = atomics.getValueAlignment();
966     tempIsVolatile = resultSlot.isVolatile();
967   } else {
968     temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
969     tempAlignment = atomics.getAtomicAlignment();
970   }
971 
972   // Slam the integer into the temporary.
973   llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
974   Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
975     ->setVolatile(tempIsVolatile);
976 
977   return atomics.convertTempToRValue(temp, resultSlot, loc);
978 }
979 
980 
981 
982 /// Copy an r-value into memory as part of storing to an atomic type.
983 /// This needs to create a bit-pattern suitable for atomic operations.
984 void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
985   // If we have an r-value, the rvalue should be of the atomic type,
986   // which means that the caller is responsible for having zeroed
987   // any padding.  Just do an aggregate copy of that type.
988   if (rvalue.isAggregate()) {
989     CGF.EmitAggregateCopy(dest.getAddress(),
990                           rvalue.getAggregateAddr(),
991                           getAtomicType(),
992                           (rvalue.isVolatileQualified()
993                            || dest.isVolatileQualified()),
994                           dest.getAlignment());
995     return;
996   }
997 
998   // Okay, otherwise we're copying stuff.
999 
1000   // Zero out the buffer if necessary.
1001   emitMemSetZeroIfNecessary(dest);
1002 
1003   // Drill past the padding if present.
1004   dest = projectValue(dest);
1005 
1006   // Okay, store the rvalue in.
1007   if (rvalue.isScalar()) {
1008     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1009   } else {
1010     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1011   }
1012 }
1013 
1014 
1015 /// Materialize an r-value into memory for the purposes of storing it
1016 /// to an atomic type.
1017 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1018   // Aggregate r-values are already in memory, and EmitAtomicStore
1019   // requires them to be values of the atomic type.
1020   if (rvalue.isAggregate())
1021     return rvalue.getAggregateAddr();
1022 
1023   // Otherwise, make a temporary and materialize into it.
1024   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1025   LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1026   emitCopyIntoMemory(rvalue, tempLV);
1027   return temp;
1028 }
1029 
1030 /// Emit a store to an l-value of atomic type.
1031 ///
1032 /// Note that the r-value is expected to be an r-value *of the atomic
1033 /// type*; this means that for aggregate r-values, it should include
1034 /// storage for any padding that was necessary.
1035 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1036   // If this is an aggregate r-value, it should agree in type except
1037   // maybe for address-space qualification.
1038   assert(!rvalue.isAggregate() ||
1039          rvalue.getAggregateAddr()->getType()->getPointerElementType()
1040            == dest.getAddress()->getType()->getPointerElementType());
1041 
1042   AtomicInfo atomics(*this, dest);
1043 
1044   // If this is an initialization, just put the value there normally.
1045   if (isInit) {
1046     atomics.emitCopyIntoMemory(rvalue, dest);
1047     return;
1048   }
1049 
1050   // Check whether we should use a library call.
1051   if (atomics.shouldUseLibcall()) {
1052     // Produce a source address.
1053     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1054 
1055     // void __atomic_store(size_t size, void *mem, void *val, int order)
1056     CallArgList args;
1057     args.add(RValue::get(atomics.getAtomicSizeValue()),
1058              getContext().getSizeType());
1059     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1060              getContext().VoidPtrTy);
1061     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1062              getContext().VoidPtrTy);
1063     args.add(RValue::get(llvm::ConstantInt::get(
1064                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1065              getContext().IntTy);
1066     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1067     return;
1068   }
1069 
1070   // Okay, we're doing this natively.
1071   llvm::Value *intValue;
1072 
1073   // If we've got a scalar value of the right size, try to avoid going
1074   // through memory.
1075   if (rvalue.isScalar() && !atomics.hasPadding()) {
1076     llvm::Value *value = rvalue.getScalarVal();
1077     if (isa<llvm::IntegerType>(value->getType())) {
1078       intValue = value;
1079     } else {
1080       llvm::IntegerType *inputIntTy =
1081         llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1082       if (isa<llvm::PointerType>(value->getType())) {
1083         intValue = Builder.CreatePtrToInt(value, inputIntTy);
1084       } else {
1085         intValue = Builder.CreateBitCast(value, inputIntTy);
1086       }
1087     }
1088 
1089   // Otherwise, we need to go through memory.
1090   } else {
1091     // Put the r-value in memory.
1092     llvm::Value *addr = atomics.materializeRValue(rvalue);
1093 
1094     // Cast the temporary to the atomic int type and pull a value out.
1095     addr = atomics.emitCastToAtomicIntPointer(addr);
1096     intValue = Builder.CreateAlignedLoad(addr,
1097                                  atomics.getAtomicAlignment().getQuantity());
1098   }
1099 
1100   // Do the atomic store.
1101   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1102   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1103 
1104   // Initializations don't need to be atomic.
1105   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1106 
1107   // Other decoration.
1108   store->setAlignment(dest.getAlignment().getQuantity());
1109   if (dest.isVolatileQualified())
1110     store->setVolatile(true);
1111   if (dest.getTBAAInfo())
1112     CGM.DecorateInstruction(store, dest.getTBAAInfo());
1113 }
1114 
1115 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1116   AtomicInfo atomics(*this, dest);
1117 
1118   switch (atomics.getEvaluationKind()) {
1119   case TEK_Scalar: {
1120     llvm::Value *value = EmitScalarExpr(init);
1121     atomics.emitCopyIntoMemory(RValue::get(value), dest);
1122     return;
1123   }
1124 
1125   case TEK_Complex: {
1126     ComplexPairTy value = EmitComplexExpr(init);
1127     atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1128     return;
1129   }
1130 
1131   case TEK_Aggregate: {
1132     // Fix up the destination if the initializer isn't an expression
1133     // of atomic type.
1134     bool Zeroed = false;
1135     if (!init->getType()->isAtomicType()) {
1136       Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
1137       dest = atomics.projectValue(dest);
1138     }
1139 
1140     // Evaluate the expression directly into the destination.
1141     AggValueSlot slot = AggValueSlot::forLValue(dest,
1142                                         AggValueSlot::IsNotDestructed,
1143                                         AggValueSlot::DoesNotNeedGCBarriers,
1144                                         AggValueSlot::IsNotAliased,
1145                                         Zeroed ? AggValueSlot::IsZeroed :
1146                                                  AggValueSlot::IsNotZeroed);
1147 
1148     EmitAggExpr(init, slot);
1149     return;
1150   }
1151   }
1152   llvm_unreachable("bad evaluation kind");
1153 }
1154