1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/CodeGen/CGFunctionInfo.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Operator.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28   class AtomicInfo {
29     CodeGenFunction &CGF;
30     QualType AtomicTy;
31     QualType ValueTy;
32     uint64_t AtomicSizeInBits;
33     uint64_t ValueSizeInBits;
34     CharUnits AtomicAlign;
35     CharUnits ValueAlign;
36     CharUnits LValueAlign;
37     TypeEvaluationKind EvaluationKind;
38     bool UseLibcall;
39   public:
40     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41       assert(lvalue.isSimple());
42 
43       AtomicTy = lvalue.getType();
44       ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45       EvaluationKind = CGF.getEvaluationKind(ValueTy);
46 
47       ASTContext &C = CGF.getContext();
48 
49       uint64_t valueAlignInBits;
50       std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
51 
52       uint64_t atomicAlignInBits;
53       std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
54 
55       assert(ValueSizeInBits <= AtomicSizeInBits);
56       assert(valueAlignInBits <= atomicAlignInBits);
57 
58       AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59       ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60       if (lvalue.getAlignment().isZero())
61         lvalue.setAlignment(AtomicAlign);
62 
63       UseLibcall =
64         (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65          AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66     }
67 
68     QualType getAtomicType() const { return AtomicTy; }
69     QualType getValueType() const { return ValueTy; }
70     CharUnits getAtomicAlignment() const { return AtomicAlign; }
71     CharUnits getValueAlignment() const { return ValueAlign; }
72     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73     uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75     bool shouldUseLibcall() const { return UseLibcall; }
76 
77     /// Is the atomic size larger than the underlying value type?
78     ///
79     /// Note that the absence of padding does not mean that atomic
80     /// objects are completely interchangeable with non-atomic
81     /// objects: we might have promoted the alignment of a type
82     /// without making it bigger.
83     bool hasPadding() const {
84       return (ValueSizeInBits != AtomicSizeInBits);
85     }
86 
87     bool emitMemSetZeroIfNecessary(LValue dest) const;
88 
89     llvm::Value *getAtomicSizeValue() const {
90       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91       return CGF.CGM.getSize(size);
92     }
93 
94     /// Cast the given pointer to an integer pointer suitable for
95     /// atomic operations.
96     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97 
98     /// Turn an atomic-layout object into an r-value.
99     RValue convertTempToRValue(llvm::Value *addr,
100                                AggValueSlot resultSlot,
101                                SourceLocation loc) const;
102 
103     /// Copy an atomic r-value into atomic-layout memory.
104     void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105 
106     /// Project an l-value down to the value field.
107     LValue projectValue(LValue lvalue) const {
108       llvm::Value *addr = lvalue.getAddress();
109       if (hasPadding())
110         addr = CGF.Builder.CreateStructGEP(addr, 0);
111 
112       return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113                               CGF.getContext(), lvalue.getTBAAInfo());
114     }
115 
116     /// Materialize an atomic r-value in atomic-layout memory.
117     llvm::Value *materializeRValue(RValue rvalue) const;
118 
119   private:
120     bool requiresMemSetZero(llvm::Type *type) const;
121   };
122 }
123 
124 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125                                 StringRef fnName,
126                                 QualType resultType,
127                                 CallArgList &args) {
128   const CGFunctionInfo &fnInfo =
129     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130             FunctionType::ExtInfo(), RequiredArgs::All);
131   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134 }
135 
136 /// Does a store of the given IR type modify the full expected width?
137 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138                            uint64_t expectedSize) {
139   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140 }
141 
142 /// Does the atomic type require memsetting to zero before initialization?
143 ///
144 /// The IR type is provided as a way of making certain queries faster.
145 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146   // If the atomic type has size padding, we definitely need a memset.
147   if (hasPadding()) return true;
148 
149   // Otherwise, do some simple heuristics to try to avoid it:
150   switch (getEvaluationKind()) {
151   // For scalars and complexes, check whether the store size of the
152   // type uses the full size.
153   case TEK_Scalar:
154     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155   case TEK_Complex:
156     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157                            AtomicSizeInBits / 2);
158 
159   // Padding in structs has an undefined bit pattern.  User beware.
160   case TEK_Aggregate:
161     return false;
162   }
163   llvm_unreachable("bad evaluation kind");
164 }
165 
166 bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
167   llvm::Value *addr = dest.getAddress();
168   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
169     return false;
170 
171   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172                            AtomicSizeInBits / 8,
173                            dest.getAlignment().getQuantity());
174   return true;
175 }
176 
177 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
178                               llvm::Value *Dest, llvm::Value *Ptr,
179                               llvm::Value *Val1, llvm::Value *Val2,
180                               uint64_t Size, unsigned Align,
181                               llvm::AtomicOrdering SuccessOrder,
182                               llvm::AtomicOrdering FailureOrder) {
183   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184   llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185   Expected->setAlignment(Align);
186   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187   Desired->setAlignment(Align);
188 
189   llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
190       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
191   Old->setVolatile(E->isVolatile());
192 
193   // Cmp holds the result of the compare-exchange operation: true on success,
194   // false on failure.
195   llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
196 
197   // This basic block is used to hold the store instruction if the operation
198   // failed.
199   llvm::BasicBlock *StoreExpectedBB =
200       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
201 
202   // This basic block is the exit point of the operation, we should end up
203   // here regardless of whether or not the operation succeeded.
204   llvm::BasicBlock *ContinueBB =
205       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
206 
207   // Update Expected if Expected isn't equal to Old, otherwise branch to the
208   // exit point.
209   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
210 
211   CGF.Builder.SetInsertPoint(StoreExpectedBB);
212   // Update the memory at Expected with Old's value.
213   llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
214   StoreExpected->setAlignment(Align);
215   // Finally, branch to the exit point.
216   CGF.Builder.CreateBr(ContinueBB);
217 
218   CGF.Builder.SetInsertPoint(ContinueBB);
219   // Update the memory at Dest with Cmp's value.
220   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
221   return;
222 }
223 
224 /// Given an ordering required on success, emit all possible cmpxchg
225 /// instructions to cope with the provided (but possibly only dynamically known)
226 /// FailureOrder.
227 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
228                                         llvm::Value *Dest, llvm::Value *Ptr,
229                                         llvm::Value *Val1, llvm::Value *Val2,
230                                         llvm::Value *FailureOrderVal,
231                                         uint64_t Size, unsigned Align,
232                                         llvm::AtomicOrdering SuccessOrder) {
233   llvm::AtomicOrdering FailureOrder;
234   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
235     switch (FO->getSExtValue()) {
236     default:
237       FailureOrder = llvm::Monotonic;
238       break;
239     case AtomicExpr::AO_ABI_memory_order_consume:
240     case AtomicExpr::AO_ABI_memory_order_acquire:
241       FailureOrder = llvm::Acquire;
242       break;
243     case AtomicExpr::AO_ABI_memory_order_seq_cst:
244       FailureOrder = llvm::SequentiallyConsistent;
245       break;
246     }
247     if (FailureOrder >= SuccessOrder) {
248       // Don't assert on undefined behaviour.
249       FailureOrder =
250         llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
251     }
252     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
253                       FailureOrder);
254     return;
255   }
256 
257   // Create all the relevant BB's
258   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
259                    *SeqCstBB = nullptr;
260   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
261   if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
262     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
263   if (SuccessOrder == llvm::SequentiallyConsistent)
264     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
265 
266   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
267 
268   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
269 
270   // Emit all the different atomics
271 
272   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
273   // doesn't matter unless someone is crazy enough to use something that
274   // doesn't fold to a constant for the ordering.
275   CGF.Builder.SetInsertPoint(MonotonicBB);
276   emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
277                     Size, Align, SuccessOrder, llvm::Monotonic);
278   CGF.Builder.CreateBr(ContBB);
279 
280   if (AcquireBB) {
281     CGF.Builder.SetInsertPoint(AcquireBB);
282     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
283                       Size, Align, SuccessOrder, llvm::Acquire);
284     CGF.Builder.CreateBr(ContBB);
285     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
286                 AcquireBB);
287     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
288                 AcquireBB);
289   }
290   if (SeqCstBB) {
291     CGF.Builder.SetInsertPoint(SeqCstBB);
292     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
293                       Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
294     CGF.Builder.CreateBr(ContBB);
295     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
296                 SeqCstBB);
297   }
298 
299   CGF.Builder.SetInsertPoint(ContBB);
300 }
301 
302 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
303                          llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
304                          llvm::Value *FailureOrder, uint64_t Size,
305                          unsigned Align, llvm::AtomicOrdering Order) {
306   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
307   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
308 
309   switch (E->getOp()) {
310   case AtomicExpr::AO__c11_atomic_init:
311     llvm_unreachable("Already handled!");
312 
313   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
314   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
315   case AtomicExpr::AO__atomic_compare_exchange:
316   case AtomicExpr::AO__atomic_compare_exchange_n:
317     emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
318                                 Size, Align, Order);
319     return;
320   case AtomicExpr::AO__c11_atomic_load:
321   case AtomicExpr::AO__atomic_load_n:
322   case AtomicExpr::AO__atomic_load: {
323     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
324     Load->setAtomic(Order);
325     Load->setAlignment(Size);
326     Load->setVolatile(E->isVolatile());
327     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
328     StoreDest->setAlignment(Align);
329     return;
330   }
331 
332   case AtomicExpr::AO__c11_atomic_store:
333   case AtomicExpr::AO__atomic_store:
334   case AtomicExpr::AO__atomic_store_n: {
335     assert(!Dest && "Store does not return a value");
336     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
337     LoadVal1->setAlignment(Align);
338     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
339     Store->setAtomic(Order);
340     Store->setAlignment(Size);
341     Store->setVolatile(E->isVolatile());
342     return;
343   }
344 
345   case AtomicExpr::AO__c11_atomic_exchange:
346   case AtomicExpr::AO__atomic_exchange_n:
347   case AtomicExpr::AO__atomic_exchange:
348     Op = llvm::AtomicRMWInst::Xchg;
349     break;
350 
351   case AtomicExpr::AO__atomic_add_fetch:
352     PostOp = llvm::Instruction::Add;
353     // Fall through.
354   case AtomicExpr::AO__c11_atomic_fetch_add:
355   case AtomicExpr::AO__atomic_fetch_add:
356     Op = llvm::AtomicRMWInst::Add;
357     break;
358 
359   case AtomicExpr::AO__atomic_sub_fetch:
360     PostOp = llvm::Instruction::Sub;
361     // Fall through.
362   case AtomicExpr::AO__c11_atomic_fetch_sub:
363   case AtomicExpr::AO__atomic_fetch_sub:
364     Op = llvm::AtomicRMWInst::Sub;
365     break;
366 
367   case AtomicExpr::AO__atomic_and_fetch:
368     PostOp = llvm::Instruction::And;
369     // Fall through.
370   case AtomicExpr::AO__c11_atomic_fetch_and:
371   case AtomicExpr::AO__atomic_fetch_and:
372     Op = llvm::AtomicRMWInst::And;
373     break;
374 
375   case AtomicExpr::AO__atomic_or_fetch:
376     PostOp = llvm::Instruction::Or;
377     // Fall through.
378   case AtomicExpr::AO__c11_atomic_fetch_or:
379   case AtomicExpr::AO__atomic_fetch_or:
380     Op = llvm::AtomicRMWInst::Or;
381     break;
382 
383   case AtomicExpr::AO__atomic_xor_fetch:
384     PostOp = llvm::Instruction::Xor;
385     // Fall through.
386   case AtomicExpr::AO__c11_atomic_fetch_xor:
387   case AtomicExpr::AO__atomic_fetch_xor:
388     Op = llvm::AtomicRMWInst::Xor;
389     break;
390 
391   case AtomicExpr::AO__atomic_nand_fetch:
392     PostOp = llvm::Instruction::And;
393     // Fall through.
394   case AtomicExpr::AO__atomic_fetch_nand:
395     Op = llvm::AtomicRMWInst::Nand;
396     break;
397   }
398 
399   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
400   LoadVal1->setAlignment(Align);
401   llvm::AtomicRMWInst *RMWI =
402       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
403   RMWI->setVolatile(E->isVolatile());
404 
405   // For __atomic_*_fetch operations, perform the operation again to
406   // determine the value which was written.
407   llvm::Value *Result = RMWI;
408   if (PostOp)
409     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
410   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
411     Result = CGF.Builder.CreateNot(Result);
412   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
413   StoreDest->setAlignment(Align);
414 }
415 
416 // This function emits any expression (scalar, complex, or aggregate)
417 // into a temporary alloca.
418 static llvm::Value *
419 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
420   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
421   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
422                        /*Init*/ true);
423   return DeclPtr;
424 }
425 
426 static void
427 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
428                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
429                   SourceLocation Loc) {
430   if (UseOptimizedLibcall) {
431     // Load value and pass it to the function directly.
432     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
433     Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
434     Args.add(RValue::get(Val), ValTy);
435   } else {
436     // Non-optimized functions always take a reference.
437     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
438                          CGF.getContext().VoidPtrTy);
439   }
440 }
441 
442 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
443   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
444   QualType MemTy = AtomicTy;
445   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
446     MemTy = AT->getValueType();
447   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
448   uint64_t Size = sizeChars.getQuantity();
449   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
450   unsigned Align = alignChars.getQuantity();
451   unsigned MaxInlineWidthInBits =
452     getTarget().getMaxAtomicInlineWidth();
453   bool UseLibcall = (Size != Align ||
454                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
455 
456   llvm::Value *OrderFail = nullptr, *Val1 = nullptr, *Val2 = nullptr;
457   llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
458 
459   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
460     assert(!Dest && "Init does not return a value");
461     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
462     EmitAtomicInit(E->getVal1(), lvalue);
463     return RValue::get(nullptr);
464   }
465 
466   llvm::Value *Order = EmitScalarExpr(E->getOrder());
467 
468   switch (E->getOp()) {
469   case AtomicExpr::AO__c11_atomic_init:
470     llvm_unreachable("Already handled!");
471 
472   case AtomicExpr::AO__c11_atomic_load:
473   case AtomicExpr::AO__atomic_load_n:
474     break;
475 
476   case AtomicExpr::AO__atomic_load:
477     Dest = EmitScalarExpr(E->getVal1());
478     break;
479 
480   case AtomicExpr::AO__atomic_store:
481     Val1 = EmitScalarExpr(E->getVal1());
482     break;
483 
484   case AtomicExpr::AO__atomic_exchange:
485     Val1 = EmitScalarExpr(E->getVal1());
486     Dest = EmitScalarExpr(E->getVal2());
487     break;
488 
489   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
490   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
491   case AtomicExpr::AO__atomic_compare_exchange_n:
492   case AtomicExpr::AO__atomic_compare_exchange:
493     Val1 = EmitScalarExpr(E->getVal1());
494     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
495       Val2 = EmitScalarExpr(E->getVal2());
496     else
497       Val2 = EmitValToTemp(*this, E->getVal2());
498     OrderFail = EmitScalarExpr(E->getOrderFail());
499     // Evaluate and discard the 'weak' argument.
500     if (E->getNumSubExprs() == 6)
501       EmitScalarExpr(E->getWeak());
502     break;
503 
504   case AtomicExpr::AO__c11_atomic_fetch_add:
505   case AtomicExpr::AO__c11_atomic_fetch_sub:
506     if (MemTy->isPointerType()) {
507       // For pointer arithmetic, we're required to do a bit of math:
508       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
509       // ... but only for the C11 builtins. The GNU builtins expect the
510       // user to multiply by sizeof(T).
511       QualType Val1Ty = E->getVal1()->getType();
512       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
513       CharUnits PointeeIncAmt =
514           getContext().getTypeSizeInChars(MemTy->getPointeeType());
515       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
516       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
517       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
518       break;
519     }
520     // Fall through.
521   case AtomicExpr::AO__atomic_fetch_add:
522   case AtomicExpr::AO__atomic_fetch_sub:
523   case AtomicExpr::AO__atomic_add_fetch:
524   case AtomicExpr::AO__atomic_sub_fetch:
525   case AtomicExpr::AO__c11_atomic_store:
526   case AtomicExpr::AO__c11_atomic_exchange:
527   case AtomicExpr::AO__atomic_store_n:
528   case AtomicExpr::AO__atomic_exchange_n:
529   case AtomicExpr::AO__c11_atomic_fetch_and:
530   case AtomicExpr::AO__c11_atomic_fetch_or:
531   case AtomicExpr::AO__c11_atomic_fetch_xor:
532   case AtomicExpr::AO__atomic_fetch_and:
533   case AtomicExpr::AO__atomic_fetch_or:
534   case AtomicExpr::AO__atomic_fetch_xor:
535   case AtomicExpr::AO__atomic_fetch_nand:
536   case AtomicExpr::AO__atomic_and_fetch:
537   case AtomicExpr::AO__atomic_or_fetch:
538   case AtomicExpr::AO__atomic_xor_fetch:
539   case AtomicExpr::AO__atomic_nand_fetch:
540     Val1 = EmitValToTemp(*this, E->getVal1());
541     break;
542   }
543 
544   if (!E->getType()->isVoidType() && !Dest)
545     Dest = CreateMemTemp(E->getType(), ".atomicdst");
546 
547   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
548   if (UseLibcall) {
549     bool UseOptimizedLibcall = false;
550     switch (E->getOp()) {
551     case AtomicExpr::AO__c11_atomic_fetch_add:
552     case AtomicExpr::AO__atomic_fetch_add:
553     case AtomicExpr::AO__c11_atomic_fetch_and:
554     case AtomicExpr::AO__atomic_fetch_and:
555     case AtomicExpr::AO__c11_atomic_fetch_or:
556     case AtomicExpr::AO__atomic_fetch_or:
557     case AtomicExpr::AO__c11_atomic_fetch_sub:
558     case AtomicExpr::AO__atomic_fetch_sub:
559     case AtomicExpr::AO__c11_atomic_fetch_xor:
560     case AtomicExpr::AO__atomic_fetch_xor:
561       // For these, only library calls for certain sizes exist.
562       UseOptimizedLibcall = true;
563       break;
564     default:
565       // Only use optimized library calls for sizes for which they exist.
566       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
567         UseOptimizedLibcall = true;
568       break;
569     }
570 
571     CallArgList Args;
572     if (!UseOptimizedLibcall) {
573       // For non-optimized library calls, the size is the first parameter
574       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
575                getContext().getSizeType());
576     }
577     // Atomic address is the first or second parameter
578     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
579 
580     std::string LibCallName;
581     QualType LoweredMemTy =
582       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
583     QualType RetTy;
584     bool HaveRetTy = false;
585     switch (E->getOp()) {
586     // There is only one libcall for compare an exchange, because there is no
587     // optimisation benefit possible from a libcall version of a weak compare
588     // and exchange.
589     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
590     //                                void *desired, int success, int failure)
591     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
592     //                                  int success, int failure)
593     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
594     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
595     case AtomicExpr::AO__atomic_compare_exchange:
596     case AtomicExpr::AO__atomic_compare_exchange_n:
597       LibCallName = "__atomic_compare_exchange";
598       RetTy = getContext().BoolTy;
599       HaveRetTy = true;
600       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
601       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
602                         E->getExprLoc());
603       Args.add(RValue::get(Order), getContext().IntTy);
604       Order = OrderFail;
605       break;
606     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
607     //                        int order)
608     // T __atomic_exchange_N(T *mem, T val, int order)
609     case AtomicExpr::AO__c11_atomic_exchange:
610     case AtomicExpr::AO__atomic_exchange_n:
611     case AtomicExpr::AO__atomic_exchange:
612       LibCallName = "__atomic_exchange";
613       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
614                         E->getExprLoc());
615       break;
616     // void __atomic_store(size_t size, void *mem, void *val, int order)
617     // void __atomic_store_N(T *mem, T val, int order)
618     case AtomicExpr::AO__c11_atomic_store:
619     case AtomicExpr::AO__atomic_store:
620     case AtomicExpr::AO__atomic_store_n:
621       LibCallName = "__atomic_store";
622       RetTy = getContext().VoidTy;
623       HaveRetTy = true;
624       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
625                         E->getExprLoc());
626       break;
627     // void __atomic_load(size_t size, void *mem, void *return, int order)
628     // T __atomic_load_N(T *mem, int order)
629     case AtomicExpr::AO__c11_atomic_load:
630     case AtomicExpr::AO__atomic_load:
631     case AtomicExpr::AO__atomic_load_n:
632       LibCallName = "__atomic_load";
633       break;
634     // T __atomic_fetch_add_N(T *mem, T val, int order)
635     case AtomicExpr::AO__c11_atomic_fetch_add:
636     case AtomicExpr::AO__atomic_fetch_add:
637       LibCallName = "__atomic_fetch_add";
638       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
639                         E->getExprLoc());
640       break;
641     // T __atomic_fetch_and_N(T *mem, T val, int order)
642     case AtomicExpr::AO__c11_atomic_fetch_and:
643     case AtomicExpr::AO__atomic_fetch_and:
644       LibCallName = "__atomic_fetch_and";
645       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
646                         E->getExprLoc());
647       break;
648     // T __atomic_fetch_or_N(T *mem, T val, int order)
649     case AtomicExpr::AO__c11_atomic_fetch_or:
650     case AtomicExpr::AO__atomic_fetch_or:
651       LibCallName = "__atomic_fetch_or";
652       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
653                         E->getExprLoc());
654       break;
655     // T __atomic_fetch_sub_N(T *mem, T val, int order)
656     case AtomicExpr::AO__c11_atomic_fetch_sub:
657     case AtomicExpr::AO__atomic_fetch_sub:
658       LibCallName = "__atomic_fetch_sub";
659       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
660                         E->getExprLoc());
661       break;
662     // T __atomic_fetch_xor_N(T *mem, T val, int order)
663     case AtomicExpr::AO__c11_atomic_fetch_xor:
664     case AtomicExpr::AO__atomic_fetch_xor:
665       LibCallName = "__atomic_fetch_xor";
666       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
667                         E->getExprLoc());
668       break;
669     default: return EmitUnsupportedRValue(E, "atomic library call");
670     }
671 
672     // Optimized functions have the size in their name.
673     if (UseOptimizedLibcall)
674       LibCallName += "_" + llvm::utostr(Size);
675     // By default, assume we return a value of the atomic type.
676     if (!HaveRetTy) {
677       if (UseOptimizedLibcall) {
678         // Value is returned directly.
679         RetTy = MemTy;
680       } else {
681         // Value is returned through parameter before the order.
682         RetTy = getContext().VoidTy;
683         Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
684                  getContext().VoidPtrTy);
685       }
686     }
687     // order is always the last parameter
688     Args.add(RValue::get(Order),
689              getContext().IntTy);
690 
691     const CGFunctionInfo &FuncInfo =
692         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
693             FunctionType::ExtInfo(), RequiredArgs::All);
694     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
695     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
696     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
697     if (!RetTy->isVoidType())
698       return Res;
699     if (E->getType()->isVoidType())
700       return RValue::get(nullptr);
701     return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
702   }
703 
704   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
705                  E->getOp() == AtomicExpr::AO__atomic_store ||
706                  E->getOp() == AtomicExpr::AO__atomic_store_n;
707   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
708                 E->getOp() == AtomicExpr::AO__atomic_load ||
709                 E->getOp() == AtomicExpr::AO__atomic_load_n;
710 
711   llvm::Type *IPtrTy =
712       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
713   llvm::Value *OrigDest = Dest;
714   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
715   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
716   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
717   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
718 
719   if (isa<llvm::ConstantInt>(Order)) {
720     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
721     switch (ord) {
722     case AtomicExpr::AO_ABI_memory_order_relaxed:
723       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
724                    Size, Align, llvm::Monotonic);
725       break;
726     case AtomicExpr::AO_ABI_memory_order_consume:
727     case AtomicExpr::AO_ABI_memory_order_acquire:
728       if (IsStore)
729         break; // Avoid crashing on code with undefined behavior
730       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
731                    Size, Align, llvm::Acquire);
732       break;
733     case AtomicExpr::AO_ABI_memory_order_release:
734       if (IsLoad)
735         break; // Avoid crashing on code with undefined behavior
736       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
737                    Size, Align, llvm::Release);
738       break;
739     case AtomicExpr::AO_ABI_memory_order_acq_rel:
740       if (IsLoad || IsStore)
741         break; // Avoid crashing on code with undefined behavior
742       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
743                    Size, Align, llvm::AcquireRelease);
744       break;
745     case AtomicExpr::AO_ABI_memory_order_seq_cst:
746       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
747                    Size, Align, llvm::SequentiallyConsistent);
748       break;
749     default: // invalid order
750       // We should not ever get here normally, but it's hard to
751       // enforce that in general.
752       break;
753     }
754     if (E->getType()->isVoidType())
755       return RValue::get(nullptr);
756     return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
757   }
758 
759   // Long case, when Order isn't obviously constant.
760 
761   // Create all the relevant BB's
762   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
763                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
764                    *SeqCstBB = nullptr;
765   MonotonicBB = createBasicBlock("monotonic", CurFn);
766   if (!IsStore)
767     AcquireBB = createBasicBlock("acquire", CurFn);
768   if (!IsLoad)
769     ReleaseBB = createBasicBlock("release", CurFn);
770   if (!IsLoad && !IsStore)
771     AcqRelBB = createBasicBlock("acqrel", CurFn);
772   SeqCstBB = createBasicBlock("seqcst", CurFn);
773   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
774 
775   // Create the switch for the split
776   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
777   // doesn't matter unless someone is crazy enough to use something that
778   // doesn't fold to a constant for the ordering.
779   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
780   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
781 
782   // Emit all the different atomics
783   Builder.SetInsertPoint(MonotonicBB);
784   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
785                Size, Align, llvm::Monotonic);
786   Builder.CreateBr(ContBB);
787   if (!IsStore) {
788     Builder.SetInsertPoint(AcquireBB);
789     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
790                  Size, Align, llvm::Acquire);
791     Builder.CreateBr(ContBB);
792     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
793                 AcquireBB);
794     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
795                 AcquireBB);
796   }
797   if (!IsLoad) {
798     Builder.SetInsertPoint(ReleaseBB);
799     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
800                  Size, Align, llvm::Release);
801     Builder.CreateBr(ContBB);
802     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
803                 ReleaseBB);
804   }
805   if (!IsLoad && !IsStore) {
806     Builder.SetInsertPoint(AcqRelBB);
807     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
808                  Size, Align, llvm::AcquireRelease);
809     Builder.CreateBr(ContBB);
810     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
811                 AcqRelBB);
812   }
813   Builder.SetInsertPoint(SeqCstBB);
814   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
815                Size, Align, llvm::SequentiallyConsistent);
816   Builder.CreateBr(ContBB);
817   SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
818               SeqCstBB);
819 
820   // Cleanup and return
821   Builder.SetInsertPoint(ContBB);
822   if (E->getType()->isVoidType())
823     return RValue::get(nullptr);
824   return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
825 }
826 
827 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
828   unsigned addrspace =
829     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
830   llvm::IntegerType *ty =
831     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
832   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
833 }
834 
835 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
836                                        AggValueSlot resultSlot,
837                                        SourceLocation loc) const {
838   if (EvaluationKind == TEK_Aggregate)
839     return resultSlot.asRValue();
840 
841   // Drill into the padding structure if we have one.
842   if (hasPadding())
843     addr = CGF.Builder.CreateStructGEP(addr, 0);
844 
845   // Otherwise, just convert the temporary to an r-value using the
846   // normal conversion routine.
847   return CGF.convertTempToRValue(addr, getValueType(), loc);
848 }
849 
850 /// Emit a load from an l-value of atomic type.  Note that the r-value
851 /// we produce is an r-value of the atomic *value* type.
852 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
853                                        AggValueSlot resultSlot) {
854   AtomicInfo atomics(*this, src);
855 
856   // Check whether we should use a library call.
857   if (atomics.shouldUseLibcall()) {
858     llvm::Value *tempAddr;
859     if (!resultSlot.isIgnored()) {
860       assert(atomics.getEvaluationKind() == TEK_Aggregate);
861       tempAddr = resultSlot.getAddr();
862     } else {
863       tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
864     }
865 
866     // void __atomic_load(size_t size, void *mem, void *return, int order);
867     CallArgList args;
868     args.add(RValue::get(atomics.getAtomicSizeValue()),
869              getContext().getSizeType());
870     args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
871              getContext().VoidPtrTy);
872     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
873              getContext().VoidPtrTy);
874     args.add(RValue::get(llvm::ConstantInt::get(
875                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
876              getContext().IntTy);
877     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
878 
879     // Produce the r-value.
880     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
881   }
882 
883   // Okay, we're doing this natively.
884   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
885   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
886   load->setAtomic(llvm::SequentiallyConsistent);
887 
888   // Other decoration.
889   load->setAlignment(src.getAlignment().getQuantity());
890   if (src.isVolatileQualified())
891     load->setVolatile(true);
892   if (src.getTBAAInfo())
893     CGM.DecorateInstruction(load, src.getTBAAInfo());
894 
895   // Okay, turn that back into the original value type.
896   QualType valueType = atomics.getValueType();
897   llvm::Value *result = load;
898 
899   // If we're ignoring an aggregate return, don't do anything.
900   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
901     return RValue::getAggregate(nullptr, false);
902 
903   // The easiest way to do this this is to go through memory, but we
904   // try not to in some easy cases.
905   if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
906     llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
907     if (isa<llvm::IntegerType>(resultTy)) {
908       assert(result->getType() == resultTy);
909       result = EmitFromMemory(result, valueType);
910     } else if (isa<llvm::PointerType>(resultTy)) {
911       result = Builder.CreateIntToPtr(result, resultTy);
912     } else {
913       result = Builder.CreateBitCast(result, resultTy);
914     }
915     return RValue::get(result);
916   }
917 
918   // Create a temporary.  This needs to be big enough to hold the
919   // atomic integer.
920   llvm::Value *temp;
921   bool tempIsVolatile = false;
922   CharUnits tempAlignment;
923   if (atomics.getEvaluationKind() == TEK_Aggregate) {
924     assert(!resultSlot.isIgnored());
925     temp = resultSlot.getAddr();
926     tempAlignment = atomics.getValueAlignment();
927     tempIsVolatile = resultSlot.isVolatile();
928   } else {
929     temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
930     tempAlignment = atomics.getAtomicAlignment();
931   }
932 
933   // Slam the integer into the temporary.
934   llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
935   Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
936     ->setVolatile(tempIsVolatile);
937 
938   return atomics.convertTempToRValue(temp, resultSlot, loc);
939 }
940 
941 
942 
943 /// Copy an r-value into memory as part of storing to an atomic type.
944 /// This needs to create a bit-pattern suitable for atomic operations.
945 void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
946   // If we have an r-value, the rvalue should be of the atomic type,
947   // which means that the caller is responsible for having zeroed
948   // any padding.  Just do an aggregate copy of that type.
949   if (rvalue.isAggregate()) {
950     CGF.EmitAggregateCopy(dest.getAddress(),
951                           rvalue.getAggregateAddr(),
952                           getAtomicType(),
953                           (rvalue.isVolatileQualified()
954                            || dest.isVolatileQualified()),
955                           dest.getAlignment());
956     return;
957   }
958 
959   // Okay, otherwise we're copying stuff.
960 
961   // Zero out the buffer if necessary.
962   emitMemSetZeroIfNecessary(dest);
963 
964   // Drill past the padding if present.
965   dest = projectValue(dest);
966 
967   // Okay, store the rvalue in.
968   if (rvalue.isScalar()) {
969     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
970   } else {
971     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
972   }
973 }
974 
975 
976 /// Materialize an r-value into memory for the purposes of storing it
977 /// to an atomic type.
978 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
979   // Aggregate r-values are already in memory, and EmitAtomicStore
980   // requires them to be values of the atomic type.
981   if (rvalue.isAggregate())
982     return rvalue.getAggregateAddr();
983 
984   // Otherwise, make a temporary and materialize into it.
985   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
986   LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
987   emitCopyIntoMemory(rvalue, tempLV);
988   return temp;
989 }
990 
991 /// Emit a store to an l-value of atomic type.
992 ///
993 /// Note that the r-value is expected to be an r-value *of the atomic
994 /// type*; this means that for aggregate r-values, it should include
995 /// storage for any padding that was necessary.
996 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
997   // If this is an aggregate r-value, it should agree in type except
998   // maybe for address-space qualification.
999   assert(!rvalue.isAggregate() ||
1000          rvalue.getAggregateAddr()->getType()->getPointerElementType()
1001            == dest.getAddress()->getType()->getPointerElementType());
1002 
1003   AtomicInfo atomics(*this, dest);
1004 
1005   // If this is an initialization, just put the value there normally.
1006   if (isInit) {
1007     atomics.emitCopyIntoMemory(rvalue, dest);
1008     return;
1009   }
1010 
1011   // Check whether we should use a library call.
1012   if (atomics.shouldUseLibcall()) {
1013     // Produce a source address.
1014     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1015 
1016     // void __atomic_store(size_t size, void *mem, void *val, int order)
1017     CallArgList args;
1018     args.add(RValue::get(atomics.getAtomicSizeValue()),
1019              getContext().getSizeType());
1020     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1021              getContext().VoidPtrTy);
1022     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1023              getContext().VoidPtrTy);
1024     args.add(RValue::get(llvm::ConstantInt::get(
1025                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1026              getContext().IntTy);
1027     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1028     return;
1029   }
1030 
1031   // Okay, we're doing this natively.
1032   llvm::Value *intValue;
1033 
1034   // If we've got a scalar value of the right size, try to avoid going
1035   // through memory.
1036   if (rvalue.isScalar() && !atomics.hasPadding()) {
1037     llvm::Value *value = rvalue.getScalarVal();
1038     if (isa<llvm::IntegerType>(value->getType())) {
1039       intValue = value;
1040     } else {
1041       llvm::IntegerType *inputIntTy =
1042         llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1043       if (isa<llvm::PointerType>(value->getType())) {
1044         intValue = Builder.CreatePtrToInt(value, inputIntTy);
1045       } else {
1046         intValue = Builder.CreateBitCast(value, inputIntTy);
1047       }
1048     }
1049 
1050   // Otherwise, we need to go through memory.
1051   } else {
1052     // Put the r-value in memory.
1053     llvm::Value *addr = atomics.materializeRValue(rvalue);
1054 
1055     // Cast the temporary to the atomic int type and pull a value out.
1056     addr = atomics.emitCastToAtomicIntPointer(addr);
1057     intValue = Builder.CreateAlignedLoad(addr,
1058                                  atomics.getAtomicAlignment().getQuantity());
1059   }
1060 
1061   // Do the atomic store.
1062   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1063   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1064 
1065   // Initializations don't need to be atomic.
1066   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1067 
1068   // Other decoration.
1069   store->setAlignment(dest.getAlignment().getQuantity());
1070   if (dest.isVolatileQualified())
1071     store->setVolatile(true);
1072   if (dest.getTBAAInfo())
1073     CGM.DecorateInstruction(store, dest.getTBAAInfo());
1074 }
1075 
1076 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1077   AtomicInfo atomics(*this, dest);
1078 
1079   switch (atomics.getEvaluationKind()) {
1080   case TEK_Scalar: {
1081     llvm::Value *value = EmitScalarExpr(init);
1082     atomics.emitCopyIntoMemory(RValue::get(value), dest);
1083     return;
1084   }
1085 
1086   case TEK_Complex: {
1087     ComplexPairTy value = EmitComplexExpr(init);
1088     atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1089     return;
1090   }
1091 
1092   case TEK_Aggregate: {
1093     // Fix up the destination if the initializer isn't an expression
1094     // of atomic type.
1095     bool Zeroed = false;
1096     if (!init->getType()->isAtomicType()) {
1097       Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
1098       dest = atomics.projectValue(dest);
1099     }
1100 
1101     // Evaluate the expression directly into the destination.
1102     AggValueSlot slot = AggValueSlot::forLValue(dest,
1103                                         AggValueSlot::IsNotDestructed,
1104                                         AggValueSlot::DoesNotNeedGCBarriers,
1105                                         AggValueSlot::IsNotAliased,
1106                                         Zeroed ? AggValueSlot::IsZeroed :
1107                                                  AggValueSlot::IsNotZeroed);
1108 
1109     EmitAggExpr(init, slot);
1110     return;
1111   }
1112   }
1113   llvm_unreachable("bad evaluation kind");
1114 }
1115