1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/CodeGen/CGFunctionInfo.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Operator.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28   class AtomicInfo {
29     CodeGenFunction &CGF;
30     QualType AtomicTy;
31     QualType ValueTy;
32     uint64_t AtomicSizeInBits;
33     uint64_t ValueSizeInBits;
34     CharUnits AtomicAlign;
35     CharUnits ValueAlign;
36     CharUnits LValueAlign;
37     TypeEvaluationKind EvaluationKind;
38     bool UseLibcall;
39   public:
40     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41       assert(lvalue.isSimple());
42 
43       AtomicTy = lvalue.getType();
44       ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45       EvaluationKind = CGF.getEvaluationKind(ValueTy);
46 
47       ASTContext &C = CGF.getContext();
48 
49       uint64_t valueAlignInBits;
50       std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
51 
52       uint64_t atomicAlignInBits;
53       std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
54 
55       assert(ValueSizeInBits <= AtomicSizeInBits);
56       assert(valueAlignInBits <= atomicAlignInBits);
57 
58       AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59       ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60       if (lvalue.getAlignment().isZero())
61         lvalue.setAlignment(AtomicAlign);
62 
63       UseLibcall =
64         (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65          AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66     }
67 
68     QualType getAtomicType() const { return AtomicTy; }
69     QualType getValueType() const { return ValueTy; }
70     CharUnits getAtomicAlignment() const { return AtomicAlign; }
71     CharUnits getValueAlignment() const { return ValueAlign; }
72     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73     uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75     bool shouldUseLibcall() const { return UseLibcall; }
76 
77     /// Is the atomic size larger than the underlying value type?
78     ///
79     /// Note that the absence of padding does not mean that atomic
80     /// objects are completely interchangeable with non-atomic
81     /// objects: we might have promoted the alignment of a type
82     /// without making it bigger.
83     bool hasPadding() const {
84       return (ValueSizeInBits != AtomicSizeInBits);
85     }
86 
87     bool emitMemSetZeroIfNecessary(LValue dest) const;
88 
89     llvm::Value *getAtomicSizeValue() const {
90       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91       return CGF.CGM.getSize(size);
92     }
93 
94     /// Cast the given pointer to an integer pointer suitable for
95     /// atomic operations.
96     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97 
98     /// Turn an atomic-layout object into an r-value.
99     RValue convertTempToRValue(llvm::Value *addr,
100                                AggValueSlot resultSlot,
101                                SourceLocation loc) const;
102 
103     /// Copy an atomic r-value into atomic-layout memory.
104     void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105 
106     /// Project an l-value down to the value field.
107     LValue projectValue(LValue lvalue) const {
108       llvm::Value *addr = lvalue.getAddress();
109       if (hasPadding())
110         addr = CGF.Builder.CreateStructGEP(addr, 0);
111 
112       return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113                               CGF.getContext(), lvalue.getTBAAInfo());
114     }
115 
116     /// Materialize an atomic r-value in atomic-layout memory.
117     llvm::Value *materializeRValue(RValue rvalue) const;
118 
119   private:
120     bool requiresMemSetZero(llvm::Type *type) const;
121   };
122 }
123 
124 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125                                 StringRef fnName,
126                                 QualType resultType,
127                                 CallArgList &args) {
128   const CGFunctionInfo &fnInfo =
129     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130             FunctionType::ExtInfo(), RequiredArgs::All);
131   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134 }
135 
136 /// Does a store of the given IR type modify the full expected width?
137 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138                            uint64_t expectedSize) {
139   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140 }
141 
142 /// Does the atomic type require memsetting to zero before initialization?
143 ///
144 /// The IR type is provided as a way of making certain queries faster.
145 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146   // If the atomic type has size padding, we definitely need a memset.
147   if (hasPadding()) return true;
148 
149   // Otherwise, do some simple heuristics to try to avoid it:
150   switch (getEvaluationKind()) {
151   // For scalars and complexes, check whether the store size of the
152   // type uses the full size.
153   case TEK_Scalar:
154     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155   case TEK_Complex:
156     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157                            AtomicSizeInBits / 2);
158 
159   // Padding in structs has an undefined bit pattern.  User beware.
160   case TEK_Aggregate:
161     return false;
162   }
163   llvm_unreachable("bad evaluation kind");
164 }
165 
166 bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
167   llvm::Value *addr = dest.getAddress();
168   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
169     return false;
170 
171   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172                            AtomicSizeInBits / 8,
173                            dest.getAlignment().getQuantity());
174   return true;
175 }
176 
177 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
178                               llvm::Value *Dest, llvm::Value *Ptr,
179                               llvm::Value *Val1, llvm::Value *Val2,
180                               uint64_t Size, unsigned Align,
181                               llvm::AtomicOrdering SuccessOrder,
182                               llvm::AtomicOrdering FailureOrder) {
183   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184   llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185   Expected->setAlignment(Align);
186   llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187   Desired->setAlignment(Align);
188 
189   llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
190       Ptr, Expected, Desired, SuccessOrder, FailureOrder);
191   Old->setVolatile(E->isVolatile());
192 
193   // Cmp holds the result of the compare-exchange operation: true on success,
194   // false on failure.
195   llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
196 
197   // This basic block is used to hold the store instruction if the operation
198   // failed.
199   llvm::BasicBlock *StoreExpectedBB =
200       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
201 
202   // This basic block is the exit point of the operation, we should end up
203   // here regardless of whether or not the operation succeeded.
204   llvm::BasicBlock *ContinueBB =
205       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
206 
207   // Update Expected if Expected isn't equal to Old, otherwise branch to the
208   // exit point.
209   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
210 
211   CGF.Builder.SetInsertPoint(StoreExpectedBB);
212   // Update the memory at Expected with Old's value.
213   llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
214   StoreExpected->setAlignment(Align);
215   // Finally, branch to the exit point.
216   CGF.Builder.CreateBr(ContinueBB);
217 
218   CGF.Builder.SetInsertPoint(ContinueBB);
219   // Update the memory at Dest with Cmp's value.
220   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
221   return;
222 }
223 
224 /// Given an ordering required on success, emit all possible cmpxchg
225 /// instructions to cope with the provided (but possibly only dynamically known)
226 /// FailureOrder.
227 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
228                                         llvm::Value *Dest, llvm::Value *Ptr,
229                                         llvm::Value *Val1, llvm::Value *Val2,
230                                         llvm::Value *FailureOrderVal,
231                                         uint64_t Size, unsigned Align,
232                                         llvm::AtomicOrdering SuccessOrder) {
233   llvm::AtomicOrdering FailureOrder;
234   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
235     switch (FO->getSExtValue()) {
236     default:
237       FailureOrder = llvm::Monotonic;
238       break;
239     case AtomicExpr::AO_ABI_memory_order_consume:
240     case AtomicExpr::AO_ABI_memory_order_acquire:
241       FailureOrder = llvm::Acquire;
242       break;
243     case AtomicExpr::AO_ABI_memory_order_seq_cst:
244       FailureOrder = llvm::SequentiallyConsistent;
245       break;
246     }
247     if (FailureOrder >= SuccessOrder) {
248       // Don't assert on undefined behaviour.
249       FailureOrder =
250         llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
251     }
252     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
253                       FailureOrder);
254     return;
255   }
256 
257   // Create all the relevant BB's
258   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *SeqCstBB = 0;
259   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
260   if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
261     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
262   if (SuccessOrder == llvm::SequentiallyConsistent)
263     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
264 
265   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
266 
267   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
268 
269   // Emit all the different atomics
270 
271   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
272   // doesn't matter unless someone is crazy enough to use something that
273   // doesn't fold to a constant for the ordering.
274   CGF.Builder.SetInsertPoint(MonotonicBB);
275   emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
276                     Size, Align, SuccessOrder, llvm::Monotonic);
277   CGF.Builder.CreateBr(ContBB);
278 
279   if (AcquireBB) {
280     CGF.Builder.SetInsertPoint(AcquireBB);
281     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
282                       Size, Align, SuccessOrder, llvm::Acquire);
283     CGF.Builder.CreateBr(ContBB);
284     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
285                 AcquireBB);
286     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
287                 AcquireBB);
288   }
289   if (SeqCstBB) {
290     CGF.Builder.SetInsertPoint(SeqCstBB);
291     emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
292                       Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
293     CGF.Builder.CreateBr(ContBB);
294     SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
295                 SeqCstBB);
296   }
297 
298   CGF.Builder.SetInsertPoint(ContBB);
299 }
300 
301 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
302                          llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
303                          llvm::Value *FailureOrder, uint64_t Size,
304                          unsigned Align, llvm::AtomicOrdering Order) {
305   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
306   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
307 
308   switch (E->getOp()) {
309   case AtomicExpr::AO__c11_atomic_init:
310     llvm_unreachable("Already handled!");
311 
312   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
313   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
314   case AtomicExpr::AO__atomic_compare_exchange:
315   case AtomicExpr::AO__atomic_compare_exchange_n:
316     emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
317                                 Size, Align, Order);
318     return;
319   case AtomicExpr::AO__c11_atomic_load:
320   case AtomicExpr::AO__atomic_load_n:
321   case AtomicExpr::AO__atomic_load: {
322     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
323     Load->setAtomic(Order);
324     Load->setAlignment(Size);
325     Load->setVolatile(E->isVolatile());
326     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
327     StoreDest->setAlignment(Align);
328     return;
329   }
330 
331   case AtomicExpr::AO__c11_atomic_store:
332   case AtomicExpr::AO__atomic_store:
333   case AtomicExpr::AO__atomic_store_n: {
334     assert(!Dest && "Store does not return a value");
335     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
336     LoadVal1->setAlignment(Align);
337     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
338     Store->setAtomic(Order);
339     Store->setAlignment(Size);
340     Store->setVolatile(E->isVolatile());
341     return;
342   }
343 
344   case AtomicExpr::AO__c11_atomic_exchange:
345   case AtomicExpr::AO__atomic_exchange_n:
346   case AtomicExpr::AO__atomic_exchange:
347     Op = llvm::AtomicRMWInst::Xchg;
348     break;
349 
350   case AtomicExpr::AO__atomic_add_fetch:
351     PostOp = llvm::Instruction::Add;
352     // Fall through.
353   case AtomicExpr::AO__c11_atomic_fetch_add:
354   case AtomicExpr::AO__atomic_fetch_add:
355     Op = llvm::AtomicRMWInst::Add;
356     break;
357 
358   case AtomicExpr::AO__atomic_sub_fetch:
359     PostOp = llvm::Instruction::Sub;
360     // Fall through.
361   case AtomicExpr::AO__c11_atomic_fetch_sub:
362   case AtomicExpr::AO__atomic_fetch_sub:
363     Op = llvm::AtomicRMWInst::Sub;
364     break;
365 
366   case AtomicExpr::AO__atomic_and_fetch:
367     PostOp = llvm::Instruction::And;
368     // Fall through.
369   case AtomicExpr::AO__c11_atomic_fetch_and:
370   case AtomicExpr::AO__atomic_fetch_and:
371     Op = llvm::AtomicRMWInst::And;
372     break;
373 
374   case AtomicExpr::AO__atomic_or_fetch:
375     PostOp = llvm::Instruction::Or;
376     // Fall through.
377   case AtomicExpr::AO__c11_atomic_fetch_or:
378   case AtomicExpr::AO__atomic_fetch_or:
379     Op = llvm::AtomicRMWInst::Or;
380     break;
381 
382   case AtomicExpr::AO__atomic_xor_fetch:
383     PostOp = llvm::Instruction::Xor;
384     // Fall through.
385   case AtomicExpr::AO__c11_atomic_fetch_xor:
386   case AtomicExpr::AO__atomic_fetch_xor:
387     Op = llvm::AtomicRMWInst::Xor;
388     break;
389 
390   case AtomicExpr::AO__atomic_nand_fetch:
391     PostOp = llvm::Instruction::And;
392     // Fall through.
393   case AtomicExpr::AO__atomic_fetch_nand:
394     Op = llvm::AtomicRMWInst::Nand;
395     break;
396   }
397 
398   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
399   LoadVal1->setAlignment(Align);
400   llvm::AtomicRMWInst *RMWI =
401       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
402   RMWI->setVolatile(E->isVolatile());
403 
404   // For __atomic_*_fetch operations, perform the operation again to
405   // determine the value which was written.
406   llvm::Value *Result = RMWI;
407   if (PostOp)
408     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
409   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
410     Result = CGF.Builder.CreateNot(Result);
411   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
412   StoreDest->setAlignment(Align);
413 }
414 
415 // This function emits any expression (scalar, complex, or aggregate)
416 // into a temporary alloca.
417 static llvm::Value *
418 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
419   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
420   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
421                        /*Init*/ true);
422   return DeclPtr;
423 }
424 
425 static void
426 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
427                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
428                   SourceLocation Loc) {
429   if (UseOptimizedLibcall) {
430     // Load value and pass it to the function directly.
431     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
432     Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
433     Args.add(RValue::get(Val), ValTy);
434   } else {
435     // Non-optimized functions always take a reference.
436     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
437                          CGF.getContext().VoidPtrTy);
438   }
439 }
440 
441 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
442   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
443   QualType MemTy = AtomicTy;
444   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
445     MemTy = AT->getValueType();
446   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
447   uint64_t Size = sizeChars.getQuantity();
448   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
449   unsigned Align = alignChars.getQuantity();
450   unsigned MaxInlineWidthInBits =
451     getTarget().getMaxAtomicInlineWidth();
452   bool UseLibcall = (Size != Align ||
453                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
454 
455   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
456   Ptr = EmitScalarExpr(E->getPtr());
457 
458   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
459     assert(!Dest && "Init does not return a value");
460     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
461     EmitAtomicInit(E->getVal1(), lvalue);
462     return RValue::get(0);
463   }
464 
465   Order = EmitScalarExpr(E->getOrder());
466 
467   switch (E->getOp()) {
468   case AtomicExpr::AO__c11_atomic_init:
469     llvm_unreachable("Already handled!");
470 
471   case AtomicExpr::AO__c11_atomic_load:
472   case AtomicExpr::AO__atomic_load_n:
473     break;
474 
475   case AtomicExpr::AO__atomic_load:
476     Dest = EmitScalarExpr(E->getVal1());
477     break;
478 
479   case AtomicExpr::AO__atomic_store:
480     Val1 = EmitScalarExpr(E->getVal1());
481     break;
482 
483   case AtomicExpr::AO__atomic_exchange:
484     Val1 = EmitScalarExpr(E->getVal1());
485     Dest = EmitScalarExpr(E->getVal2());
486     break;
487 
488   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
489   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
490   case AtomicExpr::AO__atomic_compare_exchange_n:
491   case AtomicExpr::AO__atomic_compare_exchange:
492     Val1 = EmitScalarExpr(E->getVal1());
493     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
494       Val2 = EmitScalarExpr(E->getVal2());
495     else
496       Val2 = EmitValToTemp(*this, E->getVal2());
497     OrderFail = EmitScalarExpr(E->getOrderFail());
498     // Evaluate and discard the 'weak' argument.
499     if (E->getNumSubExprs() == 6)
500       EmitScalarExpr(E->getWeak());
501     break;
502 
503   case AtomicExpr::AO__c11_atomic_fetch_add:
504   case AtomicExpr::AO__c11_atomic_fetch_sub:
505     if (MemTy->isPointerType()) {
506       // For pointer arithmetic, we're required to do a bit of math:
507       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
508       // ... but only for the C11 builtins. The GNU builtins expect the
509       // user to multiply by sizeof(T).
510       QualType Val1Ty = E->getVal1()->getType();
511       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
512       CharUnits PointeeIncAmt =
513           getContext().getTypeSizeInChars(MemTy->getPointeeType());
514       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
515       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
516       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
517       break;
518     }
519     // Fall through.
520   case AtomicExpr::AO__atomic_fetch_add:
521   case AtomicExpr::AO__atomic_fetch_sub:
522   case AtomicExpr::AO__atomic_add_fetch:
523   case AtomicExpr::AO__atomic_sub_fetch:
524   case AtomicExpr::AO__c11_atomic_store:
525   case AtomicExpr::AO__c11_atomic_exchange:
526   case AtomicExpr::AO__atomic_store_n:
527   case AtomicExpr::AO__atomic_exchange_n:
528   case AtomicExpr::AO__c11_atomic_fetch_and:
529   case AtomicExpr::AO__c11_atomic_fetch_or:
530   case AtomicExpr::AO__c11_atomic_fetch_xor:
531   case AtomicExpr::AO__atomic_fetch_and:
532   case AtomicExpr::AO__atomic_fetch_or:
533   case AtomicExpr::AO__atomic_fetch_xor:
534   case AtomicExpr::AO__atomic_fetch_nand:
535   case AtomicExpr::AO__atomic_and_fetch:
536   case AtomicExpr::AO__atomic_or_fetch:
537   case AtomicExpr::AO__atomic_xor_fetch:
538   case AtomicExpr::AO__atomic_nand_fetch:
539     Val1 = EmitValToTemp(*this, E->getVal1());
540     break;
541   }
542 
543   if (!E->getType()->isVoidType() && !Dest)
544     Dest = CreateMemTemp(E->getType(), ".atomicdst");
545 
546   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
547   if (UseLibcall) {
548     bool UseOptimizedLibcall = false;
549     switch (E->getOp()) {
550     case AtomicExpr::AO__c11_atomic_fetch_add:
551     case AtomicExpr::AO__atomic_fetch_add:
552     case AtomicExpr::AO__c11_atomic_fetch_and:
553     case AtomicExpr::AO__atomic_fetch_and:
554     case AtomicExpr::AO__c11_atomic_fetch_or:
555     case AtomicExpr::AO__atomic_fetch_or:
556     case AtomicExpr::AO__c11_atomic_fetch_sub:
557     case AtomicExpr::AO__atomic_fetch_sub:
558     case AtomicExpr::AO__c11_atomic_fetch_xor:
559     case AtomicExpr::AO__atomic_fetch_xor:
560       // For these, only library calls for certain sizes exist.
561       UseOptimizedLibcall = true;
562       break;
563     default:
564       // Only use optimized library calls for sizes for which they exist.
565       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
566         UseOptimizedLibcall = true;
567       break;
568     }
569 
570     CallArgList Args;
571     if (!UseOptimizedLibcall) {
572       // For non-optimized library calls, the size is the first parameter
573       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
574                getContext().getSizeType());
575     }
576     // Atomic address is the first or second parameter
577     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
578 
579     std::string LibCallName;
580     QualType RetTy;
581     bool HaveRetTy = false;
582     switch (E->getOp()) {
583     // There is only one libcall for compare an exchange, because there is no
584     // optimisation benefit possible from a libcall version of a weak compare
585     // and exchange.
586     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
587     //                                void *desired, int success, int failure)
588     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
589     //                                  int success, int failure)
590     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
591     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
592     case AtomicExpr::AO__atomic_compare_exchange:
593     case AtomicExpr::AO__atomic_compare_exchange_n:
594       LibCallName = "__atomic_compare_exchange";
595       RetTy = getContext().BoolTy;
596       HaveRetTy = true;
597       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
598       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
599                         E->getExprLoc());
600       Args.add(RValue::get(Order), getContext().IntTy);
601       Order = OrderFail;
602       break;
603     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
604     //                        int order)
605     // T __atomic_exchange_N(T *mem, T val, int order)
606     case AtomicExpr::AO__c11_atomic_exchange:
607     case AtomicExpr::AO__atomic_exchange_n:
608     case AtomicExpr::AO__atomic_exchange:
609       LibCallName = "__atomic_exchange";
610       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
611                         E->getExprLoc());
612       break;
613     // void __atomic_store(size_t size, void *mem, void *val, int order)
614     // void __atomic_store_N(T *mem, T val, int order)
615     case AtomicExpr::AO__c11_atomic_store:
616     case AtomicExpr::AO__atomic_store:
617     case AtomicExpr::AO__atomic_store_n:
618       LibCallName = "__atomic_store";
619       RetTy = getContext().VoidTy;
620       HaveRetTy = true;
621       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
622                         E->getExprLoc());
623       break;
624     // void __atomic_load(size_t size, void *mem, void *return, int order)
625     // T __atomic_load_N(T *mem, int order)
626     case AtomicExpr::AO__c11_atomic_load:
627     case AtomicExpr::AO__atomic_load:
628     case AtomicExpr::AO__atomic_load_n:
629       LibCallName = "__atomic_load";
630       break;
631     // T __atomic_fetch_add_N(T *mem, T val, int order)
632     case AtomicExpr::AO__c11_atomic_fetch_add:
633     case AtomicExpr::AO__atomic_fetch_add:
634       LibCallName = "__atomic_fetch_add";
635       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
636                         E->getExprLoc());
637       break;
638     // T __atomic_fetch_and_N(T *mem, T val, int order)
639     case AtomicExpr::AO__c11_atomic_fetch_and:
640     case AtomicExpr::AO__atomic_fetch_and:
641       LibCallName = "__atomic_fetch_and";
642       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
643                         E->getExprLoc());
644       break;
645     // T __atomic_fetch_or_N(T *mem, T val, int order)
646     case AtomicExpr::AO__c11_atomic_fetch_or:
647     case AtomicExpr::AO__atomic_fetch_or:
648       LibCallName = "__atomic_fetch_or";
649       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
650                         E->getExprLoc());
651       break;
652     // T __atomic_fetch_sub_N(T *mem, T val, int order)
653     case AtomicExpr::AO__c11_atomic_fetch_sub:
654     case AtomicExpr::AO__atomic_fetch_sub:
655       LibCallName = "__atomic_fetch_sub";
656       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
657                         E->getExprLoc());
658       break;
659     // T __atomic_fetch_xor_N(T *mem, T val, int order)
660     case AtomicExpr::AO__c11_atomic_fetch_xor:
661     case AtomicExpr::AO__atomic_fetch_xor:
662       LibCallName = "__atomic_fetch_xor";
663       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
664                         E->getExprLoc());
665       break;
666     default: return EmitUnsupportedRValue(E, "atomic library call");
667     }
668 
669     // Optimized functions have the size in their name.
670     if (UseOptimizedLibcall)
671       LibCallName += "_" + llvm::utostr(Size);
672     // By default, assume we return a value of the atomic type.
673     if (!HaveRetTy) {
674       if (UseOptimizedLibcall) {
675         // Value is returned directly.
676         RetTy = MemTy;
677       } else {
678         // Value is returned through parameter before the order.
679         RetTy = getContext().VoidTy;
680         Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
681                  getContext().VoidPtrTy);
682       }
683     }
684     // order is always the last parameter
685     Args.add(RValue::get(Order),
686              getContext().IntTy);
687 
688     const CGFunctionInfo &FuncInfo =
689         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
690             FunctionType::ExtInfo(), RequiredArgs::All);
691     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
692     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
693     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
694     if (!RetTy->isVoidType())
695       return Res;
696     if (E->getType()->isVoidType())
697       return RValue::get(0);
698     return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
699   }
700 
701   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
702                  E->getOp() == AtomicExpr::AO__atomic_store ||
703                  E->getOp() == AtomicExpr::AO__atomic_store_n;
704   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
705                 E->getOp() == AtomicExpr::AO__atomic_load ||
706                 E->getOp() == AtomicExpr::AO__atomic_load_n;
707 
708   llvm::Type *IPtrTy =
709       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
710   llvm::Value *OrigDest = Dest;
711   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
712   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
713   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
714   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
715 
716   if (isa<llvm::ConstantInt>(Order)) {
717     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
718     switch (ord) {
719     case AtomicExpr::AO_ABI_memory_order_relaxed:
720       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
721                    Size, Align, llvm::Monotonic);
722       break;
723     case AtomicExpr::AO_ABI_memory_order_consume:
724     case AtomicExpr::AO_ABI_memory_order_acquire:
725       if (IsStore)
726         break; // Avoid crashing on code with undefined behavior
727       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
728                    Size, Align, llvm::Acquire);
729       break;
730     case AtomicExpr::AO_ABI_memory_order_release:
731       if (IsLoad)
732         break; // Avoid crashing on code with undefined behavior
733       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
734                    Size, Align, llvm::Release);
735       break;
736     case AtomicExpr::AO_ABI_memory_order_acq_rel:
737       if (IsLoad || IsStore)
738         break; // Avoid crashing on code with undefined behavior
739       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
740                    Size, Align, llvm::AcquireRelease);
741       break;
742     case AtomicExpr::AO_ABI_memory_order_seq_cst:
743       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
744                    Size, Align, llvm::SequentiallyConsistent);
745       break;
746     default: // invalid order
747       // We should not ever get here normally, but it's hard to
748       // enforce that in general.
749       break;
750     }
751     if (E->getType()->isVoidType())
752       return RValue::get(0);
753     return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
754   }
755 
756   // Long case, when Order isn't obviously constant.
757 
758   // Create all the relevant BB's
759   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
760                    *AcqRelBB = 0, *SeqCstBB = 0;
761   MonotonicBB = createBasicBlock("monotonic", CurFn);
762   if (!IsStore)
763     AcquireBB = createBasicBlock("acquire", CurFn);
764   if (!IsLoad)
765     ReleaseBB = createBasicBlock("release", CurFn);
766   if (!IsLoad && !IsStore)
767     AcqRelBB = createBasicBlock("acqrel", CurFn);
768   SeqCstBB = createBasicBlock("seqcst", CurFn);
769   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
770 
771   // Create the switch for the split
772   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
773   // doesn't matter unless someone is crazy enough to use something that
774   // doesn't fold to a constant for the ordering.
775   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
776   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
777 
778   // Emit all the different atomics
779   Builder.SetInsertPoint(MonotonicBB);
780   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
781                Size, Align, llvm::Monotonic);
782   Builder.CreateBr(ContBB);
783   if (!IsStore) {
784     Builder.SetInsertPoint(AcquireBB);
785     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
786                  Size, Align, llvm::Acquire);
787     Builder.CreateBr(ContBB);
788     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
789                 AcquireBB);
790     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
791                 AcquireBB);
792   }
793   if (!IsLoad) {
794     Builder.SetInsertPoint(ReleaseBB);
795     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
796                  Size, Align, llvm::Release);
797     Builder.CreateBr(ContBB);
798     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
799                 ReleaseBB);
800   }
801   if (!IsLoad && !IsStore) {
802     Builder.SetInsertPoint(AcqRelBB);
803     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
804                  Size, Align, llvm::AcquireRelease);
805     Builder.CreateBr(ContBB);
806     SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
807                 AcqRelBB);
808   }
809   Builder.SetInsertPoint(SeqCstBB);
810   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
811                Size, Align, llvm::SequentiallyConsistent);
812   Builder.CreateBr(ContBB);
813   SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
814               SeqCstBB);
815 
816   // Cleanup and return
817   Builder.SetInsertPoint(ContBB);
818   if (E->getType()->isVoidType())
819     return RValue::get(0);
820   return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
821 }
822 
823 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
824   unsigned addrspace =
825     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
826   llvm::IntegerType *ty =
827     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
828   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
829 }
830 
831 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
832                                        AggValueSlot resultSlot,
833                                        SourceLocation loc) const {
834   if (EvaluationKind == TEK_Aggregate)
835     return resultSlot.asRValue();
836 
837   // Drill into the padding structure if we have one.
838   if (hasPadding())
839     addr = CGF.Builder.CreateStructGEP(addr, 0);
840 
841   // Otherwise, just convert the temporary to an r-value using the
842   // normal conversion routine.
843   return CGF.convertTempToRValue(addr, getValueType(), loc);
844 }
845 
846 /// Emit a load from an l-value of atomic type.  Note that the r-value
847 /// we produce is an r-value of the atomic *value* type.
848 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
849                                        AggValueSlot resultSlot) {
850   AtomicInfo atomics(*this, src);
851 
852   // Check whether we should use a library call.
853   if (atomics.shouldUseLibcall()) {
854     llvm::Value *tempAddr;
855     if (!resultSlot.isIgnored()) {
856       assert(atomics.getEvaluationKind() == TEK_Aggregate);
857       tempAddr = resultSlot.getAddr();
858     } else {
859       tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
860     }
861 
862     // void __atomic_load(size_t size, void *mem, void *return, int order);
863     CallArgList args;
864     args.add(RValue::get(atomics.getAtomicSizeValue()),
865              getContext().getSizeType());
866     args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
867              getContext().VoidPtrTy);
868     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
869              getContext().VoidPtrTy);
870     args.add(RValue::get(llvm::ConstantInt::get(
871                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
872              getContext().IntTy);
873     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
874 
875     // Produce the r-value.
876     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
877   }
878 
879   // Okay, we're doing this natively.
880   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
881   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
882   load->setAtomic(llvm::SequentiallyConsistent);
883 
884   // Other decoration.
885   load->setAlignment(src.getAlignment().getQuantity());
886   if (src.isVolatileQualified())
887     load->setVolatile(true);
888   if (src.getTBAAInfo())
889     CGM.DecorateInstruction(load, src.getTBAAInfo());
890 
891   // Okay, turn that back into the original value type.
892   QualType valueType = atomics.getValueType();
893   llvm::Value *result = load;
894 
895   // If we're ignoring an aggregate return, don't do anything.
896   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
897     return RValue::getAggregate(0, false);
898 
899   // The easiest way to do this this is to go through memory, but we
900   // try not to in some easy cases.
901   if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
902     llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
903     if (isa<llvm::IntegerType>(resultTy)) {
904       assert(result->getType() == resultTy);
905       result = EmitFromMemory(result, valueType);
906     } else if (isa<llvm::PointerType>(resultTy)) {
907       result = Builder.CreateIntToPtr(result, resultTy);
908     } else {
909       result = Builder.CreateBitCast(result, resultTy);
910     }
911     return RValue::get(result);
912   }
913 
914   // Create a temporary.  This needs to be big enough to hold the
915   // atomic integer.
916   llvm::Value *temp;
917   bool tempIsVolatile = false;
918   CharUnits tempAlignment;
919   if (atomics.getEvaluationKind() == TEK_Aggregate) {
920     assert(!resultSlot.isIgnored());
921     temp = resultSlot.getAddr();
922     tempAlignment = atomics.getValueAlignment();
923     tempIsVolatile = resultSlot.isVolatile();
924   } else {
925     temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
926     tempAlignment = atomics.getAtomicAlignment();
927   }
928 
929   // Slam the integer into the temporary.
930   llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
931   Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
932     ->setVolatile(tempIsVolatile);
933 
934   return atomics.convertTempToRValue(temp, resultSlot, loc);
935 }
936 
937 
938 
939 /// Copy an r-value into memory as part of storing to an atomic type.
940 /// This needs to create a bit-pattern suitable for atomic operations.
941 void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
942   // If we have an r-value, the rvalue should be of the atomic type,
943   // which means that the caller is responsible for having zeroed
944   // any padding.  Just do an aggregate copy of that type.
945   if (rvalue.isAggregate()) {
946     CGF.EmitAggregateCopy(dest.getAddress(),
947                           rvalue.getAggregateAddr(),
948                           getAtomicType(),
949                           (rvalue.isVolatileQualified()
950                            || dest.isVolatileQualified()),
951                           dest.getAlignment());
952     return;
953   }
954 
955   // Okay, otherwise we're copying stuff.
956 
957   // Zero out the buffer if necessary.
958   emitMemSetZeroIfNecessary(dest);
959 
960   // Drill past the padding if present.
961   dest = projectValue(dest);
962 
963   // Okay, store the rvalue in.
964   if (rvalue.isScalar()) {
965     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
966   } else {
967     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
968   }
969 }
970 
971 
972 /// Materialize an r-value into memory for the purposes of storing it
973 /// to an atomic type.
974 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
975   // Aggregate r-values are already in memory, and EmitAtomicStore
976   // requires them to be values of the atomic type.
977   if (rvalue.isAggregate())
978     return rvalue.getAggregateAddr();
979 
980   // Otherwise, make a temporary and materialize into it.
981   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
982   LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
983   emitCopyIntoMemory(rvalue, tempLV);
984   return temp;
985 }
986 
987 /// Emit a store to an l-value of atomic type.
988 ///
989 /// Note that the r-value is expected to be an r-value *of the atomic
990 /// type*; this means that for aggregate r-values, it should include
991 /// storage for any padding that was necessary.
992 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
993   // If this is an aggregate r-value, it should agree in type except
994   // maybe for address-space qualification.
995   assert(!rvalue.isAggregate() ||
996          rvalue.getAggregateAddr()->getType()->getPointerElementType()
997            == dest.getAddress()->getType()->getPointerElementType());
998 
999   AtomicInfo atomics(*this, dest);
1000 
1001   // If this is an initialization, just put the value there normally.
1002   if (isInit) {
1003     atomics.emitCopyIntoMemory(rvalue, dest);
1004     return;
1005   }
1006 
1007   // Check whether we should use a library call.
1008   if (atomics.shouldUseLibcall()) {
1009     // Produce a source address.
1010     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1011 
1012     // void __atomic_store(size_t size, void *mem, void *val, int order)
1013     CallArgList args;
1014     args.add(RValue::get(atomics.getAtomicSizeValue()),
1015              getContext().getSizeType());
1016     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1017              getContext().VoidPtrTy);
1018     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1019              getContext().VoidPtrTy);
1020     args.add(RValue::get(llvm::ConstantInt::get(
1021                  IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1022              getContext().IntTy);
1023     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1024     return;
1025   }
1026 
1027   // Okay, we're doing this natively.
1028   llvm::Value *intValue;
1029 
1030   // If we've got a scalar value of the right size, try to avoid going
1031   // through memory.
1032   if (rvalue.isScalar() && !atomics.hasPadding()) {
1033     llvm::Value *value = rvalue.getScalarVal();
1034     if (isa<llvm::IntegerType>(value->getType())) {
1035       intValue = value;
1036     } else {
1037       llvm::IntegerType *inputIntTy =
1038         llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1039       if (isa<llvm::PointerType>(value->getType())) {
1040         intValue = Builder.CreatePtrToInt(value, inputIntTy);
1041       } else {
1042         intValue = Builder.CreateBitCast(value, inputIntTy);
1043       }
1044     }
1045 
1046   // Otherwise, we need to go through memory.
1047   } else {
1048     // Put the r-value in memory.
1049     llvm::Value *addr = atomics.materializeRValue(rvalue);
1050 
1051     // Cast the temporary to the atomic int type and pull a value out.
1052     addr = atomics.emitCastToAtomicIntPointer(addr);
1053     intValue = Builder.CreateAlignedLoad(addr,
1054                                  atomics.getAtomicAlignment().getQuantity());
1055   }
1056 
1057   // Do the atomic store.
1058   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1059   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1060 
1061   // Initializations don't need to be atomic.
1062   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1063 
1064   // Other decoration.
1065   store->setAlignment(dest.getAlignment().getQuantity());
1066   if (dest.isVolatileQualified())
1067     store->setVolatile(true);
1068   if (dest.getTBAAInfo())
1069     CGM.DecorateInstruction(store, dest.getTBAAInfo());
1070 }
1071 
1072 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1073   AtomicInfo atomics(*this, dest);
1074 
1075   switch (atomics.getEvaluationKind()) {
1076   case TEK_Scalar: {
1077     llvm::Value *value = EmitScalarExpr(init);
1078     atomics.emitCopyIntoMemory(RValue::get(value), dest);
1079     return;
1080   }
1081 
1082   case TEK_Complex: {
1083     ComplexPairTy value = EmitComplexExpr(init);
1084     atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1085     return;
1086   }
1087 
1088   case TEK_Aggregate: {
1089     // Fix up the destination if the initializer isn't an expression
1090     // of atomic type.
1091     bool Zeroed = false;
1092     if (!init->getType()->isAtomicType()) {
1093       Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
1094       dest = atomics.projectValue(dest);
1095     }
1096 
1097     // Evaluate the expression directly into the destination.
1098     AggValueSlot slot = AggValueSlot::forLValue(dest,
1099                                         AggValueSlot::IsNotDestructed,
1100                                         AggValueSlot::DoesNotNeedGCBarriers,
1101                                         AggValueSlot::IsNotAliased,
1102                                         Zeroed ? AggValueSlot::IsZeroed :
1103                                                  AggValueSlot::IsNotZeroed);
1104 
1105     EmitAggExpr(init, slot);
1106     return;
1107   }
1108   }
1109   llvm_unreachable("bad evaluation kind");
1110 }
1111