1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/CodeGen/CGFunctionInfo.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Operator.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28   class AtomicInfo {
29     CodeGenFunction &CGF;
30     QualType AtomicTy;
31     QualType ValueTy;
32     uint64_t AtomicSizeInBits;
33     uint64_t ValueSizeInBits;
34     CharUnits AtomicAlign;
35     CharUnits ValueAlign;
36     CharUnits LValueAlign;
37     TypeEvaluationKind EvaluationKind;
38     bool UseLibcall;
39   public:
40     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41       assert(lvalue.isSimple());
42 
43       AtomicTy = lvalue.getType();
44       ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45       EvaluationKind = CGF.getEvaluationKind(ValueTy);
46 
47       ASTContext &C = CGF.getContext();
48 
49       uint64_t valueAlignInBits;
50       std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
51 
52       uint64_t atomicAlignInBits;
53       std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
54 
55       assert(ValueSizeInBits <= AtomicSizeInBits);
56       assert(valueAlignInBits <= atomicAlignInBits);
57 
58       AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59       ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60       if (lvalue.getAlignment().isZero())
61         lvalue.setAlignment(AtomicAlign);
62 
63       UseLibcall =
64         (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65          AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66     }
67 
68     QualType getAtomicType() const { return AtomicTy; }
69     QualType getValueType() const { return ValueTy; }
70     CharUnits getAtomicAlignment() const { return AtomicAlign; }
71     CharUnits getValueAlignment() const { return ValueAlign; }
72     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73     uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75     bool shouldUseLibcall() const { return UseLibcall; }
76 
77     /// Is the atomic size larger than the underlying value type?
78     ///
79     /// Note that the absence of padding does not mean that atomic
80     /// objects are completely interchangeable with non-atomic
81     /// objects: we might have promoted the alignment of a type
82     /// without making it bigger.
83     bool hasPadding() const {
84       return (ValueSizeInBits != AtomicSizeInBits);
85     }
86 
87     bool emitMemSetZeroIfNecessary(LValue dest) const;
88 
89     llvm::Value *getAtomicSizeValue() const {
90       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91       return CGF.CGM.getSize(size);
92     }
93 
94     /// Cast the given pointer to an integer pointer suitable for
95     /// atomic operations.
96     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97 
98     /// Turn an atomic-layout object into an r-value.
99     RValue convertTempToRValue(llvm::Value *addr,
100                                AggValueSlot resultSlot,
101                                SourceLocation loc) const;
102 
103     /// Copy an atomic r-value into atomic-layout memory.
104     void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105 
106     /// Project an l-value down to the value field.
107     LValue projectValue(LValue lvalue) const {
108       llvm::Value *addr = lvalue.getAddress();
109       if (hasPadding())
110         addr = CGF.Builder.CreateStructGEP(addr, 0);
111 
112       return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113                               CGF.getContext(), lvalue.getTBAAInfo());
114     }
115 
116     /// Materialize an atomic r-value in atomic-layout memory.
117     llvm::Value *materializeRValue(RValue rvalue) const;
118 
119   private:
120     bool requiresMemSetZero(llvm::Type *type) const;
121   };
122 }
123 
124 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125                                 StringRef fnName,
126                                 QualType resultType,
127                                 CallArgList &args) {
128   const CGFunctionInfo &fnInfo =
129     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130             FunctionType::ExtInfo(), RequiredArgs::All);
131   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134 }
135 
136 /// Does a store of the given IR type modify the full expected width?
137 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138                            uint64_t expectedSize) {
139   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140 }
141 
142 /// Does the atomic type require memsetting to zero before initialization?
143 ///
144 /// The IR type is provided as a way of making certain queries faster.
145 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146   // If the atomic type has size padding, we definitely need a memset.
147   if (hasPadding()) return true;
148 
149   // Otherwise, do some simple heuristics to try to avoid it:
150   switch (getEvaluationKind()) {
151   // For scalars and complexes, check whether the store size of the
152   // type uses the full size.
153   case TEK_Scalar:
154     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155   case TEK_Complex:
156     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157                            AtomicSizeInBits / 2);
158 
159   // Padding in structs has an undefined bit pattern.  User beware.
160   case TEK_Aggregate:
161     return false;
162   }
163   llvm_unreachable("bad evaluation kind");
164 }
165 
166 bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
167   llvm::Value *addr = dest.getAddress();
168   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
169     return false;
170 
171   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172                            AtomicSizeInBits / 8,
173                            dest.getAlignment().getQuantity());
174   return true;
175 }
176 
177 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
178                               llvm::Value *Dest, llvm::Value *Ptr,
179                               llvm::Value *Val1, llvm::Value *Val2,
180                               uint64_t Size, unsigned Align,
181                               llvm::AtomicOrdering SuccessOrder,
182                               llvm::AtomicOrdering FailureOrder) {
183   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184   llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185   Expected->setAlignment(Align);
186   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187   Desired->setAlignment(Align);
188 
189   llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
190       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
191   Old->setVolatile(E->isVolatile());
192 
193   // Cmp holds the result of the compare-exchange operation: true on success,
194   // false on failure.
195   llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
196 
197   // This basic block is used to hold the store instruction if the operation
198   // failed.
199   llvm::BasicBlock *StoreExpectedBB =
200       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
201 
202   // This basic block is the exit point of the operation, we should end up
203   // here regardless of whether or not the operation succeeded.
204   llvm::BasicBlock *ContinueBB =
205       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
206 
207   // Update Expected if Expected isn't equal to Old, otherwise branch to the
208   // exit point.
209   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
210 
211   CGF.Builder.SetInsertPoint(StoreExpectedBB);
212   // Update the memory at Expected with Old's value.
213   llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
214   StoreExpected->setAlignment(Align);
215   // Finally, branch to the exit point.
216   CGF.Builder.CreateBr(ContinueBB);
217 
218   CGF.Builder.SetInsertPoint(ContinueBB);
219   // Update the memory at Dest with Cmp's value.
220   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
221   return;
222 }
223 
224 /// Given an ordering required on success, emit all possible cmpxchg
225 /// instructions to cope with the provided (but possibly only dynamically known)
226 /// FailureOrder.
227 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
228                                         llvm::Value *Dest, llvm::Value *Ptr,
229                                         llvm::Value *Val1, llvm::Value *Val2,
230                                         llvm::Value *FailureOrderVal,
231                                         uint64_t Size, unsigned Align,
232                                         llvm::AtomicOrdering SuccessOrder) {
233   llvm::AtomicOrdering FailureOrder;
234   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
235     switch (FO->getSExtValue()) {
236     default:
237       FailureOrder = llvm::Monotonic;
238       break;
239     case AtomicExpr::AO_ABI_memory_order_consume:
240     case AtomicExpr::AO_ABI_memory_order_acquire:
241       FailureOrder = llvm::Acquire;
242       break;
243     case AtomicExpr::AO_ABI_memory_order_seq_cst:
244       FailureOrder = llvm::SequentiallyConsistent;
245       break;
246     }
247     if (FailureOrder >= SuccessOrder) {
248       // Don't assert on undefined behaviour.
249       FailureOrder =
250         llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
251     }
252     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
253                       FailureOrder);
254     return;
255   }
256 
257   // Create all the relevant BB's
258   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *SeqCstBB = 0;
259   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
260   if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
261     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
262   if (SuccessOrder == llvm::SequentiallyConsistent)
263     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
264 
265   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
266 
267   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
268 
269   // Emit all the different atomics
270 
271   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
272   // doesn't matter unless someone is crazy enough to use something that
273   // doesn't fold to a constant for the ordering.
274   CGF.Builder.SetInsertPoint(MonotonicBB);
275   emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
276                     Size, Align, SuccessOrder, llvm::Monotonic);
277   CGF.Builder.CreateBr(ContBB);
278 
279   if (AcquireBB) {
280     CGF.Builder.SetInsertPoint(AcquireBB);
281     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
282                       Size, Align, SuccessOrder, llvm::Acquire);
283     CGF.Builder.CreateBr(ContBB);
284     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
285                 AcquireBB);
286     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
287                 AcquireBB);
288   }
289   if (SeqCstBB) {
290     CGF.Builder.SetInsertPoint(SeqCstBB);
291     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
292                       Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
293     CGF.Builder.CreateBr(ContBB);
294     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
295                 SeqCstBB);
296   }
297 
298   CGF.Builder.SetInsertPoint(ContBB);
299 }
300 
301 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
302                          llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
303                          llvm::Value *FailureOrder, uint64_t Size,
304                          unsigned Align, llvm::AtomicOrdering Order) {
305   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
306   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
307 
308   switch (E->getOp()) {
309   case AtomicExpr::AO__c11_atomic_init:
310     llvm_unreachable("Already handled!");
311 
312   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
313   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
314   case AtomicExpr::AO__atomic_compare_exchange:
315   case AtomicExpr::AO__atomic_compare_exchange_n:
316     emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
317                                 Size, Align, Order);
318     return;
319   case AtomicExpr::AO__c11_atomic_load:
320   case AtomicExpr::AO__atomic_load_n:
321   case AtomicExpr::AO__atomic_load: {
322     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
323     Load->setAtomic(Order);
324     Load->setAlignment(Size);
325     Load->setVolatile(E->isVolatile());
326     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
327     StoreDest->setAlignment(Align);
328     return;
329   }
330 
331   case AtomicExpr::AO__c11_atomic_store:
332   case AtomicExpr::AO__atomic_store:
333   case AtomicExpr::AO__atomic_store_n: {
334     assert(!Dest && "Store does not return a value");
335     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
336     LoadVal1->setAlignment(Align);
337     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
338     Store->setAtomic(Order);
339     Store->setAlignment(Size);
340     Store->setVolatile(E->isVolatile());
341     return;
342   }
343 
344   case AtomicExpr::AO__c11_atomic_exchange:
345   case AtomicExpr::AO__atomic_exchange_n:
346   case AtomicExpr::AO__atomic_exchange:
347     Op = llvm::AtomicRMWInst::Xchg;
348     break;
349 
350   case AtomicExpr::AO__atomic_add_fetch:
351     PostOp = llvm::Instruction::Add;
352     // Fall through.
353   case AtomicExpr::AO__c11_atomic_fetch_add:
354   case AtomicExpr::AO__atomic_fetch_add:
355     Op = llvm::AtomicRMWInst::Add;
356     break;
357 
358   case AtomicExpr::AO__atomic_sub_fetch:
359     PostOp = llvm::Instruction::Sub;
360     // Fall through.
361   case AtomicExpr::AO__c11_atomic_fetch_sub:
362   case AtomicExpr::AO__atomic_fetch_sub:
363     Op = llvm::AtomicRMWInst::Sub;
364     break;
365 
366   case AtomicExpr::AO__atomic_and_fetch:
367     PostOp = llvm::Instruction::And;
368     // Fall through.
369   case AtomicExpr::AO__c11_atomic_fetch_and:
370   case AtomicExpr::AO__atomic_fetch_and:
371     Op = llvm::AtomicRMWInst::And;
372     break;
373 
374   case AtomicExpr::AO__atomic_or_fetch:
375     PostOp = llvm::Instruction::Or;
376     // Fall through.
377   case AtomicExpr::AO__c11_atomic_fetch_or:
378   case AtomicExpr::AO__atomic_fetch_or:
379     Op = llvm::AtomicRMWInst::Or;
380     break;
381 
382   case AtomicExpr::AO__atomic_xor_fetch:
383     PostOp = llvm::Instruction::Xor;
384     // Fall through.
385   case AtomicExpr::AO__c11_atomic_fetch_xor:
386   case AtomicExpr::AO__atomic_fetch_xor:
387     Op = llvm::AtomicRMWInst::Xor;
388     break;
389 
390   case AtomicExpr::AO__atomic_nand_fetch:
391     PostOp = llvm::Instruction::And;
392     // Fall through.
393   case AtomicExpr::AO__atomic_fetch_nand:
394     Op = llvm::AtomicRMWInst::Nand;
395     break;
396   }
397 
398   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
399   LoadVal1->setAlignment(Align);
400   llvm::AtomicRMWInst *RMWI =
401       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
402   RMWI->setVolatile(E->isVolatile());
403 
404   // For __atomic_*_fetch operations, perform the operation again to
405   // determine the value which was written.
406   llvm::Value *Result = RMWI;
407   if (PostOp)
408     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
409   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
410     Result = CGF.Builder.CreateNot(Result);
411   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
412   StoreDest->setAlignment(Align);
413 }
414 
415 // This function emits any expression (scalar, complex, or aggregate)
416 // into a temporary alloca.
417 static llvm::Value *
418 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
419   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
420   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
421                        /*Init*/ true);
422   return DeclPtr;
423 }
424 
425 static void
426 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
427                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
428                   SourceLocation Loc) {
429   if (UseOptimizedLibcall) {
430     // Load value and pass it to the function directly.
431     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
432     Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
433     Args.add(RValue::get(Val), ValTy);
434   } else {
435     // Non-optimized functions always take a reference.
436     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
437                          CGF.getContext().VoidPtrTy);
438   }
439 }
440 
441 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
442   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
443   QualType MemTy = AtomicTy;
444   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
445     MemTy = AT->getValueType();
446   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
447   uint64_t Size = sizeChars.getQuantity();
448   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
449   unsigned Align = alignChars.getQuantity();
450   unsigned MaxInlineWidthInBits =
451     getTarget().getMaxAtomicInlineWidth();
452   bool UseLibcall = (Size != Align ||
453                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
454 
455   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
456   Ptr = EmitScalarExpr(E->getPtr());
457 
458   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
459     assert(!Dest && "Init does not return a value");
460     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
461     EmitAtomicInit(E->getVal1(), lvalue);
462     return RValue::get(0);
463   }
464 
465   Order = EmitScalarExpr(E->getOrder());
466 
467   switch (E->getOp()) {
468   case AtomicExpr::AO__c11_atomic_init:
469     llvm_unreachable("Already handled!");
470 
471   case AtomicExpr::AO__c11_atomic_load:
472   case AtomicExpr::AO__atomic_load_n:
473     break;
474 
475   case AtomicExpr::AO__atomic_load:
476     Dest = EmitScalarExpr(E->getVal1());
477     break;
478 
479   case AtomicExpr::AO__atomic_store:
480     Val1 = EmitScalarExpr(E->getVal1());
481     break;
482 
483   case AtomicExpr::AO__atomic_exchange:
484     Val1 = EmitScalarExpr(E->getVal1());
485     Dest = EmitScalarExpr(E->getVal2());
486     break;
487 
488   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
489   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
490   case AtomicExpr::AO__atomic_compare_exchange_n:
491   case AtomicExpr::AO__atomic_compare_exchange:
492     Val1 = EmitScalarExpr(E->getVal1());
493     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
494       Val2 = EmitScalarExpr(E->getVal2());
495     else
496       Val2 = EmitValToTemp(*this, E->getVal2());
497     OrderFail = EmitScalarExpr(E->getOrderFail());
498     // Evaluate and discard the 'weak' argument.
499     if (E->getNumSubExprs() == 6)
500       EmitScalarExpr(E->getWeak());
501     break;
502 
503   case AtomicExpr::AO__c11_atomic_fetch_add:
504   case AtomicExpr::AO__c11_atomic_fetch_sub:
505     if (MemTy->isPointerType()) {
506       // For pointer arithmetic, we're required to do a bit of math:
507       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
508       // ... but only for the C11 builtins. The GNU builtins expect the
509       // user to multiply by sizeof(T).
510       QualType Val1Ty = E->getVal1()->getType();
511       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
512       CharUnits PointeeIncAmt =
513           getContext().getTypeSizeInChars(MemTy->getPointeeType());
514       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
515       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
516       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
517       break;
518     }
519     // Fall through.
520   case AtomicExpr::AO__atomic_fetch_add:
521   case AtomicExpr::AO__atomic_fetch_sub:
522   case AtomicExpr::AO__atomic_add_fetch:
523   case AtomicExpr::AO__atomic_sub_fetch:
524   case AtomicExpr::AO__c11_atomic_store:
525   case AtomicExpr::AO__c11_atomic_exchange:
526   case AtomicExpr::AO__atomic_store_n:
527   case AtomicExpr::AO__atomic_exchange_n:
528   case AtomicExpr::AO__c11_atomic_fetch_and:
529   case AtomicExpr::AO__c11_atomic_fetch_or:
530   case AtomicExpr::AO__c11_atomic_fetch_xor:
531   case AtomicExpr::AO__atomic_fetch_and:
532   case AtomicExpr::AO__atomic_fetch_or:
533   case AtomicExpr::AO__atomic_fetch_xor:
534   case AtomicExpr::AO__atomic_fetch_nand:
535   case AtomicExpr::AO__atomic_and_fetch:
536   case AtomicExpr::AO__atomic_or_fetch:
537   case AtomicExpr::AO__atomic_xor_fetch:
538   case AtomicExpr::AO__atomic_nand_fetch:
539     Val1 = EmitValToTemp(*this, E->getVal1());
540     break;
541   }
542 
543   if (!E->getType()->isVoidType() && !Dest)
544     Dest = CreateMemTemp(E->getType(), ".atomicdst");
545 
546   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
547   if (UseLibcall) {
548     bool UseOptimizedLibcall = false;
549     switch (E->getOp()) {
550     case AtomicExpr::AO__c11_atomic_fetch_add:
551     case AtomicExpr::AO__atomic_fetch_add:
552     case AtomicExpr::AO__c11_atomic_fetch_and:
553     case AtomicExpr::AO__atomic_fetch_and:
554     case AtomicExpr::AO__c11_atomic_fetch_or:
555     case AtomicExpr::AO__atomic_fetch_or:
556     case AtomicExpr::AO__c11_atomic_fetch_sub:
557     case AtomicExpr::AO__atomic_fetch_sub:
558     case AtomicExpr::AO__c11_atomic_fetch_xor:
559     case AtomicExpr::AO__atomic_fetch_xor:
560       // For these, only library calls for certain sizes exist.
561       UseOptimizedLibcall = true;
562       break;
563     default:
564       // Only use optimized library calls for sizes for which they exist.
565       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
566         UseOptimizedLibcall = true;
567       break;
568     }
569 
570     CallArgList Args;
571     if (!UseOptimizedLibcall) {
572       // For non-optimized library calls, the size is the first parameter
573       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
574                getContext().getSizeType());
575     }
576     // Atomic address is the first or second parameter
577     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
578 
579     std::string LibCallName;
580     QualType LoweredMemTy =
581       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
582     QualType RetTy;
583     bool HaveRetTy = false;
584     switch (E->getOp()) {
585     // There is only one libcall for compare an exchange, because there is no
586     // optimisation benefit possible from a libcall version of a weak compare
587     // and exchange.
588     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
589     //                                void *desired, int success, int failure)
590     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
591     //                                  int success, int failure)
592     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
593     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
594     case AtomicExpr::AO__atomic_compare_exchange:
595     case AtomicExpr::AO__atomic_compare_exchange_n:
596       LibCallName = "__atomic_compare_exchange";
597       RetTy = getContext().BoolTy;
598       HaveRetTy = true;
599       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
600       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
601                         E->getExprLoc());
602       Args.add(RValue::get(Order), getContext().IntTy);
603       Order = OrderFail;
604       break;
605     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
606     //                        int order)
607     // T __atomic_exchange_N(T *mem, T val, int order)
608     case AtomicExpr::AO__c11_atomic_exchange:
609     case AtomicExpr::AO__atomic_exchange_n:
610     case AtomicExpr::AO__atomic_exchange:
611       LibCallName = "__atomic_exchange";
612       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
613                         E->getExprLoc());
614       break;
615     // void __atomic_store(size_t size, void *mem, void *val, int order)
616     // void __atomic_store_N(T *mem, T val, int order)
617     case AtomicExpr::AO__c11_atomic_store:
618     case AtomicExpr::AO__atomic_store:
619     case AtomicExpr::AO__atomic_store_n:
620       LibCallName = "__atomic_store";
621       RetTy = getContext().VoidTy;
622       HaveRetTy = true;
623       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
624                         E->getExprLoc());
625       break;
626     // void __atomic_load(size_t size, void *mem, void *return, int order)
627     // T __atomic_load_N(T *mem, int order)
628     case AtomicExpr::AO__c11_atomic_load:
629     case AtomicExpr::AO__atomic_load:
630     case AtomicExpr::AO__atomic_load_n:
631       LibCallName = "__atomic_load";
632       break;
633     // T __atomic_fetch_add_N(T *mem, T val, int order)
634     case AtomicExpr::AO__c11_atomic_fetch_add:
635     case AtomicExpr::AO__atomic_fetch_add:
636       LibCallName = "__atomic_fetch_add";
637       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
638                         E->getExprLoc());
639       break;
640     // T __atomic_fetch_and_N(T *mem, T val, int order)
641     case AtomicExpr::AO__c11_atomic_fetch_and:
642     case AtomicExpr::AO__atomic_fetch_and:
643       LibCallName = "__atomic_fetch_and";
644       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
645                         E->getExprLoc());
646       break;
647     // T __atomic_fetch_or_N(T *mem, T val, int order)
648     case AtomicExpr::AO__c11_atomic_fetch_or:
649     case AtomicExpr::AO__atomic_fetch_or:
650       LibCallName = "__atomic_fetch_or";
651       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
652                         E->getExprLoc());
653       break;
654     // T __atomic_fetch_sub_N(T *mem, T val, int order)
655     case AtomicExpr::AO__c11_atomic_fetch_sub:
656     case AtomicExpr::AO__atomic_fetch_sub:
657       LibCallName = "__atomic_fetch_sub";
658       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
659                         E->getExprLoc());
660       break;
661     // T __atomic_fetch_xor_N(T *mem, T val, int order)
662     case AtomicExpr::AO__c11_atomic_fetch_xor:
663     case AtomicExpr::AO__atomic_fetch_xor:
664       LibCallName = "__atomic_fetch_xor";
665       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
666                         E->getExprLoc());
667       break;
668     default: return EmitUnsupportedRValue(E, "atomic library call");
669     }
670 
671     // Optimized functions have the size in their name.
672     if (UseOptimizedLibcall)
673       LibCallName += "_" + llvm::utostr(Size);
674     // By default, assume we return a value of the atomic type.
675     if (!HaveRetTy) {
676       if (UseOptimizedLibcall) {
677         // Value is returned directly.
678         RetTy = MemTy;
679       } else {
680         // Value is returned through parameter before the order.
681         RetTy = getContext().VoidTy;
682         Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
683                  getContext().VoidPtrTy);
684       }
685     }
686     // order is always the last parameter
687     Args.add(RValue::get(Order),
688              getContext().IntTy);
689 
690     const CGFunctionInfo &FuncInfo =
691         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
692             FunctionType::ExtInfo(), RequiredArgs::All);
693     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
694     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
695     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
696     if (!RetTy->isVoidType())
697       return Res;
698     if (E->getType()->isVoidType())
699       return RValue::get(0);
700     return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
701   }
702 
703   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
704                  E->getOp() == AtomicExpr::AO__atomic_store ||
705                  E->getOp() == AtomicExpr::AO__atomic_store_n;
706   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
707                 E->getOp() == AtomicExpr::AO__atomic_load ||
708                 E->getOp() == AtomicExpr::AO__atomic_load_n;
709 
710   llvm::Type *IPtrTy =
711       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
712   llvm::Value *OrigDest = Dest;
713   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
714   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
715   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
716   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
717 
718   if (isa<llvm::ConstantInt>(Order)) {
719     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
720     switch (ord) {
721     case AtomicExpr::AO_ABI_memory_order_relaxed:
722       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
723                    Size, Align, llvm::Monotonic);
724       break;
725     case AtomicExpr::AO_ABI_memory_order_consume:
726     case AtomicExpr::AO_ABI_memory_order_acquire:
727       if (IsStore)
728         break; // Avoid crashing on code with undefined behavior
729       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
730                    Size, Align, llvm::Acquire);
731       break;
732     case AtomicExpr::AO_ABI_memory_order_release:
733       if (IsLoad)
734         break; // Avoid crashing on code with undefined behavior
735       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
736                    Size, Align, llvm::Release);
737       break;
738     case AtomicExpr::AO_ABI_memory_order_acq_rel:
739       if (IsLoad || IsStore)
740         break; // Avoid crashing on code with undefined behavior
741       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
742                    Size, Align, llvm::AcquireRelease);
743       break;
744     case AtomicExpr::AO_ABI_memory_order_seq_cst:
745       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
746                    Size, Align, llvm::SequentiallyConsistent);
747       break;
748     default: // invalid order
749       // We should not ever get here normally, but it's hard to
750       // enforce that in general.
751       break;
752     }
753     if (E->getType()->isVoidType())
754       return RValue::get(0);
755     return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
756   }
757 
758   // Long case, when Order isn't obviously constant.
759 
760   // Create all the relevant BB's
761   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
762                    *AcqRelBB = 0, *SeqCstBB = 0;
763   MonotonicBB = createBasicBlock("monotonic", CurFn);
764   if (!IsStore)
765     AcquireBB = createBasicBlock("acquire", CurFn);
766   if (!IsLoad)
767     ReleaseBB = createBasicBlock("release", CurFn);
768   if (!IsLoad && !IsStore)
769     AcqRelBB = createBasicBlock("acqrel", CurFn);
770   SeqCstBB = createBasicBlock("seqcst", CurFn);
771   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
772 
773   // Create the switch for the split
774   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
775   // doesn't matter unless someone is crazy enough to use something that
776   // doesn't fold to a constant for the ordering.
777   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
778   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
779 
780   // Emit all the different atomics
781   Builder.SetInsertPoint(MonotonicBB);
782   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
783                Size, Align, llvm::Monotonic);
784   Builder.CreateBr(ContBB);
785   if (!IsStore) {
786     Builder.SetInsertPoint(AcquireBB);
787     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
788                  Size, Align, llvm::Acquire);
789     Builder.CreateBr(ContBB);
790     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
791                 AcquireBB);
792     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
793                 AcquireBB);
794   }
795   if (!IsLoad) {
796     Builder.SetInsertPoint(ReleaseBB);
797     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
798                  Size, Align, llvm::Release);
799     Builder.CreateBr(ContBB);
800     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
801                 ReleaseBB);
802   }
803   if (!IsLoad && !IsStore) {
804     Builder.SetInsertPoint(AcqRelBB);
805     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
806                  Size, Align, llvm::AcquireRelease);
807     Builder.CreateBr(ContBB);
808     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
809                 AcqRelBB);
810   }
811   Builder.SetInsertPoint(SeqCstBB);
812   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
813                Size, Align, llvm::SequentiallyConsistent);
814   Builder.CreateBr(ContBB);
815   SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
816               SeqCstBB);
817 
818   // Cleanup and return
819   Builder.SetInsertPoint(ContBB);
820   if (E->getType()->isVoidType())
821     return RValue::get(0);
822   return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
823 }
824 
825 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
826   unsigned addrspace =
827     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
828   llvm::IntegerType *ty =
829     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
830   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
831 }
832 
833 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
834                                        AggValueSlot resultSlot,
835                                        SourceLocation loc) const {
836   if (EvaluationKind == TEK_Aggregate)
837     return resultSlot.asRValue();
838 
839   // Drill into the padding structure if we have one.
840   if (hasPadding())
841     addr = CGF.Builder.CreateStructGEP(addr, 0);
842 
843   // Otherwise, just convert the temporary to an r-value using the
844   // normal conversion routine.
845   return CGF.convertTempToRValue(addr, getValueType(), loc);
846 }
847 
848 /// Emit a load from an l-value of atomic type.  Note that the r-value
849 /// we produce is an r-value of the atomic *value* type.
850 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
851                                        AggValueSlot resultSlot) {
852   AtomicInfo atomics(*this, src);
853 
854   // Check whether we should use a library call.
855   if (atomics.shouldUseLibcall()) {
856     llvm::Value *tempAddr;
857     if (!resultSlot.isIgnored()) {
858       assert(atomics.getEvaluationKind() == TEK_Aggregate);
859       tempAddr = resultSlot.getAddr();
860     } else {
861       tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
862     }
863 
864     // void __atomic_load(size_t size, void *mem, void *return, int order);
865     CallArgList args;
866     args.add(RValue::get(atomics.getAtomicSizeValue()),
867              getContext().getSizeType());
868     args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
869              getContext().VoidPtrTy);
870     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
871              getContext().VoidPtrTy);
872     args.add(RValue::get(llvm::ConstantInt::get(
873                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
874              getContext().IntTy);
875     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
876 
877     // Produce the r-value.
878     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
879   }
880 
881   // Okay, we're doing this natively.
882   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
883   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
884   load->setAtomic(llvm::SequentiallyConsistent);
885 
886   // Other decoration.
887   load->setAlignment(src.getAlignment().getQuantity());
888   if (src.isVolatileQualified())
889     load->setVolatile(true);
890   if (src.getTBAAInfo())
891     CGM.DecorateInstruction(load, src.getTBAAInfo());
892 
893   // Okay, turn that back into the original value type.
894   QualType valueType = atomics.getValueType();
895   llvm::Value *result = load;
896 
897   // If we're ignoring an aggregate return, don't do anything.
898   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
899     return RValue::getAggregate(0, false);
900 
901   // The easiest way to do this this is to go through memory, but we
902   // try not to in some easy cases.
903   if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
904     llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
905     if (isa<llvm::IntegerType>(resultTy)) {
906       assert(result->getType() == resultTy);
907       result = EmitFromMemory(result, valueType);
908     } else if (isa<llvm::PointerType>(resultTy)) {
909       result = Builder.CreateIntToPtr(result, resultTy);
910     } else {
911       result = Builder.CreateBitCast(result, resultTy);
912     }
913     return RValue::get(result);
914   }
915 
916   // Create a temporary.  This needs to be big enough to hold the
917   // atomic integer.
918   llvm::Value *temp;
919   bool tempIsVolatile = false;
920   CharUnits tempAlignment;
921   if (atomics.getEvaluationKind() == TEK_Aggregate) {
922     assert(!resultSlot.isIgnored());
923     temp = resultSlot.getAddr();
924     tempAlignment = atomics.getValueAlignment();
925     tempIsVolatile = resultSlot.isVolatile();
926   } else {
927     temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
928     tempAlignment = atomics.getAtomicAlignment();
929   }
930 
931   // Slam the integer into the temporary.
932   llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
933   Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
934     ->setVolatile(tempIsVolatile);
935 
936   return atomics.convertTempToRValue(temp, resultSlot, loc);
937 }
938 
939 
940 
941 /// Copy an r-value into memory as part of storing to an atomic type.
942 /// This needs to create a bit-pattern suitable for atomic operations.
943 void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
944   // If we have an r-value, the rvalue should be of the atomic type,
945   // which means that the caller is responsible for having zeroed
946   // any padding.  Just do an aggregate copy of that type.
947   if (rvalue.isAggregate()) {
948     CGF.EmitAggregateCopy(dest.getAddress(),
949                           rvalue.getAggregateAddr(),
950                           getAtomicType(),
951                           (rvalue.isVolatileQualified()
952                            || dest.isVolatileQualified()),
953                           dest.getAlignment());
954     return;
955   }
956 
957   // Okay, otherwise we're copying stuff.
958 
959   // Zero out the buffer if necessary.
960   emitMemSetZeroIfNecessary(dest);
961 
962   // Drill past the padding if present.
963   dest = projectValue(dest);
964 
965   // Okay, store the rvalue in.
966   if (rvalue.isScalar()) {
967     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
968   } else {
969     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
970   }
971 }
972 
973 
974 /// Materialize an r-value into memory for the purposes of storing it
975 /// to an atomic type.
976 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
977   // Aggregate r-values are already in memory, and EmitAtomicStore
978   // requires them to be values of the atomic type.
979   if (rvalue.isAggregate())
980     return rvalue.getAggregateAddr();
981 
982   // Otherwise, make a temporary and materialize into it.
983   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
984   LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
985   emitCopyIntoMemory(rvalue, tempLV);
986   return temp;
987 }
988 
989 /// Emit a store to an l-value of atomic type.
990 ///
991 /// Note that the r-value is expected to be an r-value *of the atomic
992 /// type*; this means that for aggregate r-values, it should include
993 /// storage for any padding that was necessary.
994 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
995   // If this is an aggregate r-value, it should agree in type except
996   // maybe for address-space qualification.
997   assert(!rvalue.isAggregate() ||
998          rvalue.getAggregateAddr()->getType()->getPointerElementType()
999            == dest.getAddress()->getType()->getPointerElementType());
1000 
1001   AtomicInfo atomics(*this, dest);
1002 
1003   // If this is an initialization, just put the value there normally.
1004   if (isInit) {
1005     atomics.emitCopyIntoMemory(rvalue, dest);
1006     return;
1007   }
1008 
1009   // Check whether we should use a library call.
1010   if (atomics.shouldUseLibcall()) {
1011     // Produce a source address.
1012     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1013 
1014     // void __atomic_store(size_t size, void *mem, void *val, int order)
1015     CallArgList args;
1016     args.add(RValue::get(atomics.getAtomicSizeValue()),
1017              getContext().getSizeType());
1018     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1019              getContext().VoidPtrTy);
1020     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1021              getContext().VoidPtrTy);
1022     args.add(RValue::get(llvm::ConstantInt::get(
1023                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1024              getContext().IntTy);
1025     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1026     return;
1027   }
1028 
1029   // Okay, we're doing this natively.
1030   llvm::Value *intValue;
1031 
1032   // If we've got a scalar value of the right size, try to avoid going
1033   // through memory.
1034   if (rvalue.isScalar() && !atomics.hasPadding()) {
1035     llvm::Value *value = rvalue.getScalarVal();
1036     if (isa<llvm::IntegerType>(value->getType())) {
1037       intValue = value;
1038     } else {
1039       llvm::IntegerType *inputIntTy =
1040         llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1041       if (isa<llvm::PointerType>(value->getType())) {
1042         intValue = Builder.CreatePtrToInt(value, inputIntTy);
1043       } else {
1044         intValue = Builder.CreateBitCast(value, inputIntTy);
1045       }
1046     }
1047 
1048   // Otherwise, we need to go through memory.
1049   } else {
1050     // Put the r-value in memory.
1051     llvm::Value *addr = atomics.materializeRValue(rvalue);
1052 
1053     // Cast the temporary to the atomic int type and pull a value out.
1054     addr = atomics.emitCastToAtomicIntPointer(addr);
1055     intValue = Builder.CreateAlignedLoad(addr,
1056                                  atomics.getAtomicAlignment().getQuantity());
1057   }
1058 
1059   // Do the atomic store.
1060   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1061   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1062 
1063   // Initializations don't need to be atomic.
1064   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1065 
1066   // Other decoration.
1067   store->setAlignment(dest.getAlignment().getQuantity());
1068   if (dest.isVolatileQualified())
1069     store->setVolatile(true);
1070   if (dest.getTBAAInfo())
1071     CGM.DecorateInstruction(store, dest.getTBAAInfo());
1072 }
1073 
1074 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1075   AtomicInfo atomics(*this, dest);
1076 
1077   switch (atomics.getEvaluationKind()) {
1078   case TEK_Scalar: {
1079     llvm::Value *value = EmitScalarExpr(init);
1080     atomics.emitCopyIntoMemory(RValue::get(value), dest);
1081     return;
1082   }
1083 
1084   case TEK_Complex: {
1085     ComplexPairTy value = EmitComplexExpr(init);
1086     atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1087     return;
1088   }
1089 
1090   case TEK_Aggregate: {
1091     // Fix up the destination if the initializer isn't an expression
1092     // of atomic type.
1093     bool Zeroed = false;
1094     if (!init->getType()->isAtomicType()) {
1095       Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
1096       dest = atomics.projectValue(dest);
1097     }
1098 
1099     // Evaluate the expression directly into the destination.
1100     AggValueSlot slot = AggValueSlot::forLValue(dest,
1101                                         AggValueSlot::IsNotDestructed,
1102                                         AggValueSlot::DoesNotNeedGCBarriers,
1103                                         AggValueSlot::IsNotAliased,
1104                                         Zeroed ? AggValueSlot::IsZeroed :
1105                                                  AggValueSlot::IsNotZeroed);
1106 
1107     EmitAggExpr(init, slot);
1108     return;
1109   }
1110   }
1111   llvm_unreachable("bad evaluation kind");
1112 }
1113