1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/Support/Compiler.h"
23 #include "llvm/Intrinsics.h"
24 using namespace clang;
25 using namespace CodeGen;
26 
27 //===----------------------------------------------------------------------===//
28 //                        Aggregate Expression Emitter
29 //===----------------------------------------------------------------------===//
30 
31 namespace  {
32 class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> {
33   CodeGenFunction &CGF;
34   CGBuilderTy &Builder;
35   llvm::Value *DestPtr;
36   bool VolatileDest;
37   bool IgnoreResult;
38 
39 public:
40   AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
41                  bool ignore)
42     : CGF(cgf), Builder(CGF.Builder),
43       DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore) {
44   }
45 
46   //===--------------------------------------------------------------------===//
47   //                               Utilities
48   //===--------------------------------------------------------------------===//
49 
50   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
51   /// represents a value lvalue, this method emits the address of the lvalue,
52   /// then loads the result into DestPtr.
53   void EmitAggLoadOfLValue(const Expr *E);
54 
55   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
56   void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
57   void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
58 
59   //===--------------------------------------------------------------------===//
60   //                            Visitor Methods
61   //===--------------------------------------------------------------------===//
62 
63   void VisitStmt(Stmt *S) {
64     CGF.ErrorUnsupported(S, "aggregate expression");
65   }
66   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
67   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
68 
69   // l-values.
70   void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
71   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
72   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
73   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
74   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
75     EmitAggLoadOfLValue(E);
76   }
77   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
78     EmitAggLoadOfLValue(E);
79   }
80   void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
81     EmitAggLoadOfLValue(E);
82   }
83   void VisitPredefinedExpr(const PredefinedExpr *E) {
84     EmitAggLoadOfLValue(E);
85   }
86 
87   // Operators.
88   void VisitCStyleCastExpr(CStyleCastExpr *E);
89   void VisitImplicitCastExpr(ImplicitCastExpr *E);
90   void VisitCallExpr(const CallExpr *E);
91   void VisitStmtExpr(const StmtExpr *E);
92   void VisitBinaryOperator(const BinaryOperator *BO);
93   void VisitBinAssign(const BinaryOperator *E);
94   void VisitBinComma(const BinaryOperator *E);
95 
96   void VisitObjCMessageExpr(ObjCMessageExpr *E);
97   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
98     EmitAggLoadOfLValue(E);
99   }
100   void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
101   void VisitObjCKVCRefExpr(ObjCKVCRefExpr *E);
102 
103   void VisitConditionalOperator(const ConditionalOperator *CO);
104   void VisitInitListExpr(InitListExpr *E);
105   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
106     Visit(DAE->getExpr());
107   }
108   void VisitCXXConstructExpr(const CXXConstructExpr *E);
109   void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
110 
111   void VisitVAArgExpr(VAArgExpr *E);
112 
113   void EmitInitializationToLValue(Expr *E, LValue Address);
114   void EmitNullInitializationToLValue(LValue Address, QualType T);
115   //  case Expr::ChooseExprClass:
116 
117 };
118 }  // end anonymous namespace.
119 
120 //===----------------------------------------------------------------------===//
121 //                                Utilities
122 //===----------------------------------------------------------------------===//
123 
124 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
125 /// represents a value lvalue, this method emits the address of the lvalue,
126 /// then loads the result into DestPtr.
127 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
128   LValue LV = CGF.EmitLValue(E);
129   EmitFinalDestCopy(E, LV);
130 }
131 
132 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
133 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
134   assert(Src.isAggregate() && "value must be aggregate value!");
135 
136   // If the result is ignored, don't copy from the value.
137   if (DestPtr == 0) {
138     if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
139       return;
140     // If the source is volatile, we must read from it; to do that, we need
141     // some place to put it.
142     DestPtr = CGF.CreateTempAlloca(CGF.ConvertType(E->getType()), "agg.tmp");
143   }
144 
145   // If the result of the assignment is used, copy the LHS there also.
146   // FIXME: Pass VolatileDest as well.  I think we also need to merge volatile
147   // from the source as well, as we can't eliminate it if either operand
148   // is volatile, unless copy has volatile for both source and destination..
149   CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
150                         VolatileDest|Src.isVolatileQualified());
151 }
152 
153 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
154 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
155   assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
156 
157   EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
158                                             Src.isVolatileQualified()),
159                     Ignore);
160 }
161 
162 //===----------------------------------------------------------------------===//
163 //                            Visitor Methods
164 //===----------------------------------------------------------------------===//
165 
166 void AggExprEmitter::VisitCStyleCastExpr(CStyleCastExpr *E) {
167   // GCC union extension
168   if (E->getType()->isUnionType()) {
169     RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
170     LValue FieldLoc = CGF.EmitLValueForField(DestPtr,
171                                              *SD->field_begin(CGF.getContext()),
172                                              true, 0);
173     EmitInitializationToLValue(E->getSubExpr(), FieldLoc);
174     return;
175   }
176 
177   Visit(E->getSubExpr());
178 }
179 
180 void AggExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
181   assert(CGF.getContext().typesAreCompatible(
182                           E->getSubExpr()->getType().getUnqualifiedType(),
183                           E->getType().getUnqualifiedType()) &&
184          "Implicit cast types must be compatible");
185   Visit(E->getSubExpr());
186 }
187 
188 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
189   RValue RV = CGF.EmitCallExpr(E);
190   EmitFinalDestCopy(E, RV);
191 }
192 
193 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
194   RValue RV = CGF.EmitObjCMessageExpr(E);
195   EmitFinalDestCopy(E, RV);
196 }
197 
198 void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
199   RValue RV = CGF.EmitObjCPropertyGet(E);
200   EmitFinalDestCopy(E, RV);
201 }
202 
203 void AggExprEmitter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
204   RValue RV = CGF.EmitObjCPropertyGet(E);
205   EmitFinalDestCopy(E, RV);
206 }
207 
208 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
209   CGF.EmitAnyExpr(E->getLHS());
210   CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest);
211 }
212 
213 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
214   CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
215 }
216 
217 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
218   CGF.ErrorUnsupported(E, "aggregate binary expression");
219 }
220 
221 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
222   // For an assignment to work, the value on the right has
223   // to be compatible with the value on the left.
224   assert(CGF.getContext().typesAreCompatible(
225              E->getLHS()->getType().getUnqualifiedType(),
226              E->getRHS()->getType().getUnqualifiedType())
227          && "Invalid assignment");
228   LValue LHS = CGF.EmitLValue(E->getLHS());
229 
230   // We have to special case property setters, otherwise we must have
231   // a simple lvalue (no aggregates inside vectors, bitfields).
232   if (LHS.isPropertyRef()) {
233     llvm::Value *AggLoc = DestPtr;
234     if (!AggLoc)
235       AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
236     CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
237     CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
238                             RValue::getAggregate(AggLoc, VolatileDest));
239   }
240   else if (LHS.isKVCRef()) {
241     llvm::Value *AggLoc = DestPtr;
242     if (!AggLoc)
243       AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
244     CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
245     CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
246                             RValue::getAggregate(AggLoc, VolatileDest));
247   } else {
248     // Codegen the RHS so that it stores directly into the LHS.
249     CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified());
250     EmitFinalDestCopy(E, LHS, true);
251   }
252 }
253 
254 void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
255   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
256   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
257   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
258 
259   llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
260   Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
261 
262   CGF.EmitBlock(LHSBlock);
263 
264   // Handle the GNU extension for missing LHS.
265   assert(E->getLHS() && "Must have LHS for aggregate value");
266 
267   Visit(E->getLHS());
268   CGF.EmitBranch(ContBlock);
269 
270   CGF.EmitBlock(RHSBlock);
271 
272   Visit(E->getRHS());
273   CGF.EmitBranch(ContBlock);
274 
275   CGF.EmitBlock(ContBlock);
276 }
277 
278 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
279   llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
280   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
281 
282   if (!ArgPtr) {
283     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
284     return;
285   }
286 
287   EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, 0));
288 }
289 
290 void
291 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
292   llvm::Value *V = DestPtr;
293 
294   if (!V) {
295     assert(isa<CXXTempVarDecl>(E->getVarDecl()) &&
296            "Must have a temp var decl when there's no destination!");
297 
298     V = CGF.CreateTempAlloca(CGF.ConvertType(E->getVarDecl()->getType()),
299                              "tmpvar");
300   }
301 
302   CGF.EmitCXXConstructExpr(V, E);
303 }
304 
305 void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
306   // FIXME: Do something with the temporaries!
307   Visit(E->getSubExpr());
308 }
309 
310 void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
311   // FIXME: Are initializers affected by volatile?
312   if (isa<ImplicitValueInitExpr>(E)) {
313     EmitNullInitializationToLValue(LV, E->getType());
314   } else if (E->getType()->isComplexType()) {
315     CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
316   } else if (CGF.hasAggregateLLVMType(E->getType())) {
317     CGF.EmitAnyExpr(E, LV.getAddress(), false);
318   } else {
319     CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, E->getType());
320   }
321 }
322 
323 void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
324   if (!CGF.hasAggregateLLVMType(T)) {
325     // For non-aggregates, we can store zero
326     llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
327     CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
328   } else {
329     // Otherwise, just memset the whole thing to zero.  This is legal
330     // because in LLVM, all default initializers are guaranteed to have a
331     // bit pattern of all zeros.
332     // FIXME: That isn't true for member pointers!
333     // There's a potential optimization opportunity in combining
334     // memsets; that would be easy for arrays, but relatively
335     // difficult for structures with the current code.
336     CGF.EmitMemSetToZero(LV.getAddress(), T);
337   }
338 }
339 
340 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
341 #if 0
342   // FIXME: Disabled while we figure out what to do about
343   // test/CodeGen/bitfield.c
344   //
345   // If we can, prefer a copy from a global; this is a lot less code for long
346   // globals, and it's easier for the current optimizers to analyze.
347   // FIXME: Should we really be doing this? Should we try to avoid cases where
348   // we emit a global with a lot of zeros?  Should we try to avoid short
349   // globals?
350   if (E->isConstantInitializer(CGF.getContext(), 0)) {
351     llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, &CGF);
352     llvm::GlobalVariable* GV =
353     new llvm::GlobalVariable(C->getType(), true,
354                              llvm::GlobalValue::InternalLinkage,
355                              C, "", &CGF.CGM.getModule(), 0);
356     EmitFinalDestCopy(E, LValue::MakeAddr(GV, 0));
357     return;
358   }
359 #endif
360   if (E->hadArrayRangeDesignator()) {
361     CGF.ErrorUnsupported(E, "GNU array range designator extension");
362   }
363 
364   // Handle initialization of an array.
365   if (E->getType()->isArrayType()) {
366     const llvm::PointerType *APType =
367       cast<llvm::PointerType>(DestPtr->getType());
368     const llvm::ArrayType *AType =
369       cast<llvm::ArrayType>(APType->getElementType());
370 
371     uint64_t NumInitElements = E->getNumInits();
372 
373     if (E->getNumInits() > 0) {
374       QualType T1 = E->getType();
375       QualType T2 = E->getInit(0)->getType();
376       if (CGF.getContext().getCanonicalType(T1).getUnqualifiedType() ==
377           CGF.getContext().getCanonicalType(T2).getUnqualifiedType()) {
378         EmitAggLoadOfLValue(E->getInit(0));
379         return;
380       }
381     }
382 
383     uint64_t NumArrayElements = AType->getNumElements();
384     QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
385     ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
386 
387     unsigned CVRqualifier = ElementType.getCVRQualifiers();
388 
389     for (uint64_t i = 0; i != NumArrayElements; ++i) {
390       llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
391       if (i < NumInitElements)
392         EmitInitializationToLValue(E->getInit(i),
393                                    LValue::MakeAddr(NextVal, CVRqualifier));
394       else
395         EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, CVRqualifier),
396                                        ElementType);
397     }
398     return;
399   }
400 
401   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
402 
403   // Do struct initialization; this code just sets each individual member
404   // to the approprate value.  This makes bitfield support automatic;
405   // the disadvantage is that the generated code is more difficult for
406   // the optimizer, especially with bitfields.
407   unsigned NumInitElements = E->getNumInits();
408   RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
409   unsigned CurInitVal = 0;
410 
411   if (E->getType()->isUnionType()) {
412     // Only initialize one field of a union. The field itself is
413     // specified by the initializer list.
414     if (!E->getInitializedFieldInUnion()) {
415       // Empty union; we have nothing to do.
416 
417 #ifndef NDEBUG
418       // Make sure that it's really an empty and not a failure of
419       // semantic analysis.
420       for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()),
421                                    FieldEnd = SD->field_end(CGF.getContext());
422            Field != FieldEnd; ++Field)
423         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
424 #endif
425       return;
426     }
427 
428     // FIXME: volatility
429     FieldDecl *Field = E->getInitializedFieldInUnion();
430     LValue FieldLoc = CGF.EmitLValueForField(DestPtr, Field, true, 0);
431 
432     if (NumInitElements) {
433       // Store the initializer into the field
434       EmitInitializationToLValue(E->getInit(0), FieldLoc);
435     } else {
436       // Default-initialize to null
437       EmitNullInitializationToLValue(FieldLoc, Field->getType());
438     }
439 
440     return;
441   }
442 
443   // Here we iterate over the fields; this makes it simpler to both
444   // default-initialize fields and skip over unnamed fields.
445   for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()),
446                                FieldEnd = SD->field_end(CGF.getContext());
447        Field != FieldEnd; ++Field) {
448     // We're done once we hit the flexible array member
449     if (Field->getType()->isIncompleteArrayType())
450       break;
451 
452     if (Field->isUnnamedBitfield())
453       continue;
454 
455     // FIXME: volatility
456     LValue FieldLoc = CGF.EmitLValueForField(DestPtr, *Field, false, 0);
457     if (CurInitVal < NumInitElements) {
458       // Store the initializer into the field
459       EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc);
460     } else {
461       // We're out of initalizers; default-initialize to null
462       EmitNullInitializationToLValue(FieldLoc, Field->getType());
463     }
464   }
465 }
466 
467 //===----------------------------------------------------------------------===//
468 //                        Entry Points into this File
469 //===----------------------------------------------------------------------===//
470 
471 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
472 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
473 /// the value of the aggregate expression is not needed.  If VolatileDest is
474 /// true, DestPtr cannot be 0.
475 void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
476                                   bool VolatileDest, bool IgnoreResult) {
477   assert(E && hasAggregateLLVMType(E->getType()) &&
478          "Invalid aggregate expression to emit");
479   assert ((DestPtr != 0 || VolatileDest == false)
480           && "volatile aggregate can't be 0");
481 
482   AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult)
483     .Visit(const_cast<Expr*>(E));
484 }
485 
486 void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
487   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
488 
489   EmitMemSetToZero(DestPtr, Ty);
490 }
491 
492 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
493                                         llvm::Value *SrcPtr, QualType Ty,
494                                         bool isVolatile) {
495   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
496 
497   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
498   // C99 6.5.16.1p3, which states "If the value being stored in an object is
499   // read from another object that overlaps in anyway the storage of the first
500   // object, then the overlap shall be exact and the two objects shall have
501   // qualified or unqualified versions of a compatible type."
502   //
503   // memcpy is not defined if the source and destination pointers are exactly
504   // equal, but other compilers do this optimization, and almost every memcpy
505   // implementation handles this case safely.  If there is a libc that does not
506   // safely handle this, we can add a target hook.
507   const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
508   if (DestPtr->getType() != BP)
509     DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
510   if (SrcPtr->getType() != BP)
511     SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
512 
513   // Get size and alignment info for this aggregate.
514   std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
515 
516   // FIXME: Handle variable sized types.
517   const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
518 
519   // FIXME: If we have a volatile struct, the optimizer can remove what might
520   // appear to be `extra' memory ops:
521   //
522   // volatile struct { int i; } a, b;
523   //
524   // int main() {
525   //   a = b;
526   //   a = b;
527   // }
528   //
529   // we need to use a differnt call here.  We use isVolatile to indicate when
530   // either the source or the destination is volatile.
531   Builder.CreateCall4(CGM.getMemCpyFn(),
532                       DestPtr, SrcPtr,
533                       // TypeInfo.first describes size in bits.
534                       llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
535                       llvm::ConstantInt::get(llvm::Type::Int32Ty,
536                                              TypeInfo.second/8));
537 }
538