1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Constant Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGObjCRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "clang/AST/APValue.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "llvm/Constants.h"
24 #include "llvm/Function.h"
25 #include "llvm/GlobalVariable.h"
26 #include "llvm/Target/TargetData.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 //===----------------------------------------------------------------------===//
31 //                            ConstStructBuilder
32 //===----------------------------------------------------------------------===//
33 
34 namespace {
35 class ConstStructBuilder {
36   CodeGenModule &CGM;
37   CodeGenFunction *CGF;
38 
39   bool Packed;
40   unsigned NextFieldOffsetInBytes;
41   unsigned LLVMStructAlignment;
42   std::vector<llvm::Constant *> Elements;
43 public:
44   static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
45                                      InitListExpr *ILE);
46 
47 private:
48   ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
49     : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
50     LLVMStructAlignment(1) { }
51 
52   bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
53                    llvm::Constant *InitExpr);
54 
55   void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
56                       llvm::ConstantInt *InitExpr);
57 
58   void AppendPadding(uint64_t NumBytes);
59 
60   void AppendTailPadding(uint64_t RecordSize);
61 
62   void ConvertStructToPacked();
63 
64   bool Build(InitListExpr *ILE);
65 
66   unsigned getAlignment(const llvm::Constant *C) const {
67     if (Packed)  return 1;
68     return CGM.getTargetData().getABITypeAlignment(C->getType());
69   }
70 
71   uint64_t getSizeInBytes(const llvm::Constant *C) const {
72     return CGM.getTargetData().getTypeAllocSize(C->getType());
73   }
74 };
75 
76 bool ConstStructBuilder::
77 AppendField(const FieldDecl *Field, uint64_t FieldOffset,
78             llvm::Constant *InitCst) {
79   uint64_t FieldOffsetInBytes = FieldOffset / 8;
80 
81   assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
82          && "Field offset mismatch!");
83 
84   // Emit the field.
85   if (!InitCst)
86     return false;
87 
88   unsigned FieldAlignment = getAlignment(InitCst);
89 
90   // Round up the field offset to the alignment of the field type.
91   uint64_t AlignedNextFieldOffsetInBytes =
92     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
93 
94   if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
95     assert(!Packed && "Alignment is wrong even with a packed struct!");
96 
97     // Convert the struct to a packed struct.
98     ConvertStructToPacked();
99 
100     AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
101   }
102 
103   if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
104     // We need to append padding.
105     AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
106 
107     assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
108            "Did not add enough padding!");
109 
110     AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
111   }
112 
113   // Add the field.
114   Elements.push_back(InitCst);
115   NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
116                              getSizeInBytes(InitCst);
117 
118   if (Packed)
119     assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
120   else
121     LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
122 
123   return true;
124 }
125 
126 void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
127                                         uint64_t FieldOffset,
128                                         llvm::ConstantInt *CI) {
129   if (FieldOffset > NextFieldOffsetInBytes * 8) {
130     // We need to add padding.
131     uint64_t NumBytes =
132       llvm::RoundUpToAlignment(FieldOffset -
133                                NextFieldOffsetInBytes * 8, 8) / 8;
134 
135     AppendPadding(NumBytes);
136   }
137 
138   uint64_t FieldSize =
139     Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
140 
141   llvm::APInt FieldValue = CI->getValue();
142 
143   // Promote the size of FieldValue if necessary
144   // FIXME: This should never occur, but currently it can because initializer
145   // constants are cast to bool, and because clang is not enforcing bitfield
146   // width limits.
147   if (FieldSize > FieldValue.getBitWidth())
148     FieldValue.zext(FieldSize);
149 
150   // Truncate the size of FieldValue to the bit field size.
151   if (FieldSize < FieldValue.getBitWidth())
152     FieldValue.trunc(FieldSize);
153 
154   if (FieldOffset < NextFieldOffsetInBytes * 8) {
155     // Either part of the field or the entire field can go into the previous
156     // byte.
157     assert(!Elements.empty() && "Elements can't be empty!");
158 
159     unsigned BitsInPreviousByte =
160       NextFieldOffsetInBytes * 8 - FieldOffset;
161 
162     bool FitsCompletelyInPreviousByte =
163       BitsInPreviousByte >= FieldValue.getBitWidth();
164 
165     llvm::APInt Tmp = FieldValue;
166 
167     if (!FitsCompletelyInPreviousByte) {
168       unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
169 
170       if (CGM.getTargetData().isBigEndian()) {
171         Tmp = Tmp.lshr(NewFieldWidth);
172         Tmp.trunc(BitsInPreviousByte);
173 
174         // We want the remaining high bits.
175         FieldValue.trunc(NewFieldWidth);
176       } else {
177         Tmp.trunc(BitsInPreviousByte);
178 
179         // We want the remaining low bits.
180         FieldValue = FieldValue.lshr(BitsInPreviousByte);
181         FieldValue.trunc(NewFieldWidth);
182       }
183     }
184 
185     Tmp.zext(8);
186     if (CGM.getTargetData().isBigEndian()) {
187       if (FitsCompletelyInPreviousByte)
188         Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
189     } else {
190       Tmp = Tmp.shl(8 - BitsInPreviousByte);
191     }
192 
193     // 'or' in the bits that go into the previous byte.
194     llvm::Value *LastElt = Elements.back();
195     if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
196       Tmp |= Val->getValue();
197     else {
198       assert(isa<llvm::UndefValue>(LastElt));
199       // If there is an undef field that we're adding to, it can either be a
200       // scalar undef (in which case, we just replace it with our field) or it
201       // is an array.  If it is an array, we have to pull one byte off the
202       // array so that the other undef bytes stay around.
203       if (!isa<llvm::IntegerType>(LastElt->getType())) {
204         // The undef padding will be a multibyte array, create a new smaller
205         // padding and then an hole for our i8 to get plopped into.
206         assert(isa<llvm::ArrayType>(LastElt->getType()) &&
207                "Expected array padding of undefs");
208         const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
209         assert(AT->getElementType()->isIntegerTy(8) &&
210                AT->getNumElements() != 0 &&
211                "Expected non-empty array padding of undefs");
212 
213         // Remove the padding array.
214         NextFieldOffsetInBytes -= AT->getNumElements();
215         Elements.pop_back();
216 
217         // Add the padding back in two chunks.
218         AppendPadding(AT->getNumElements()-1);
219         AppendPadding(1);
220         assert(isa<llvm::UndefValue>(Elements.back()) &&
221                Elements.back()->getType()->isIntegerTy(8) &&
222                "Padding addition didn't work right");
223       }
224     }
225 
226     Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
227 
228     if (FitsCompletelyInPreviousByte)
229       return;
230   }
231 
232   while (FieldValue.getBitWidth() > 8) {
233     llvm::APInt Tmp;
234 
235     if (CGM.getTargetData().isBigEndian()) {
236       // We want the high bits.
237       Tmp = FieldValue;
238       Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
239       Tmp.trunc(8);
240     } else {
241       // We want the low bits.
242       Tmp = FieldValue;
243       Tmp.trunc(8);
244 
245       FieldValue = FieldValue.lshr(8);
246     }
247 
248     Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
249     NextFieldOffsetInBytes++;
250 
251     FieldValue.trunc(FieldValue.getBitWidth() - 8);
252   }
253 
254   assert(FieldValue.getBitWidth() > 0 &&
255          "Should have at least one bit left!");
256   assert(FieldValue.getBitWidth() <= 8 &&
257          "Should not have more than a byte left!");
258 
259   if (FieldValue.getBitWidth() < 8) {
260     if (CGM.getTargetData().isBigEndian()) {
261       unsigned BitWidth = FieldValue.getBitWidth();
262 
263       FieldValue.zext(8);
264       FieldValue = FieldValue << (8 - BitWidth);
265     } else
266       FieldValue.zext(8);
267   }
268 
269   // Append the last element.
270   Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
271                                             FieldValue));
272   NextFieldOffsetInBytes++;
273 }
274 
275 void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
276   if (!NumBytes)
277     return;
278 
279   const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
280   if (NumBytes > 1)
281     Ty = llvm::ArrayType::get(Ty, NumBytes);
282 
283   llvm::Constant *C = llvm::UndefValue::get(Ty);
284   Elements.push_back(C);
285   assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
286 
287   NextFieldOffsetInBytes += getSizeInBytes(C);
288 }
289 
290 void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
291   assert(RecordSize % 8 == 0 && "Invalid record size!");
292 
293   uint64_t RecordSizeInBytes = RecordSize / 8;
294   assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
295 
296   unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
297   AppendPadding(NumPadBytes);
298 }
299 
300 void ConstStructBuilder::ConvertStructToPacked() {
301   std::vector<llvm::Constant *> PackedElements;
302   uint64_t ElementOffsetInBytes = 0;
303 
304   for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
305     llvm::Constant *C = Elements[i];
306 
307     unsigned ElementAlign =
308       CGM.getTargetData().getABITypeAlignment(C->getType());
309     uint64_t AlignedElementOffsetInBytes =
310       llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
311 
312     if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
313       // We need some padding.
314       uint64_t NumBytes =
315         AlignedElementOffsetInBytes - ElementOffsetInBytes;
316 
317       const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
318       if (NumBytes > 1)
319         Ty = llvm::ArrayType::get(Ty, NumBytes);
320 
321       llvm::Constant *Padding = llvm::UndefValue::get(Ty);
322       PackedElements.push_back(Padding);
323       ElementOffsetInBytes += getSizeInBytes(Padding);
324     }
325 
326     PackedElements.push_back(C);
327     ElementOffsetInBytes += getSizeInBytes(C);
328   }
329 
330   assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
331          "Packing the struct changed its size!");
332 
333   Elements = PackedElements;
334   LLVMStructAlignment = 1;
335   Packed = true;
336 }
337 
338 bool ConstStructBuilder::Build(InitListExpr *ILE) {
339   RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
340   const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
341 
342   unsigned FieldNo = 0;
343   unsigned ElementNo = 0;
344   for (RecordDecl::field_iterator Field = RD->field_begin(),
345        FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
346 
347     // If this is a union, skip all the fields that aren't being initialized.
348     if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
349       continue;
350 
351     // Don't emit anonymous bitfields, they just affect layout.
352     if (Field->isBitField() && !Field->getIdentifier())
353       continue;
354 
355     // Get the initializer.  A struct can include fields without initializers,
356     // we just use explicit null values for them.
357     llvm::Constant *EltInit;
358     if (ElementNo < ILE->getNumInits())
359       EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
360                                      Field->getType(), CGF);
361     else
362       EltInit = CGM.EmitNullConstant(Field->getType());
363 
364     if (!Field->isBitField()) {
365       // Handle non-bitfield members.
366       if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
367         return false;
368     } else {
369       // Otherwise we have a bitfield.
370       AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
371                      cast<llvm::ConstantInt>(EltInit));
372     }
373   }
374 
375   uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
376 
377   if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
378     // If the struct is bigger than the size of the record type,
379     // we must have a flexible array member at the end.
380     assert(RD->hasFlexibleArrayMember() &&
381            "Must have flexible array member if struct is bigger than type!");
382 
383     // No tail padding is necessary.
384     return true;
385   }
386 
387   uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
388                                                       LLVMStructAlignment);
389 
390   // Check if we need to convert the struct to a packed struct.
391   if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
392       LLVMSizeInBytes > LayoutSizeInBytes) {
393     assert(!Packed && "Size mismatch!");
394 
395     ConvertStructToPacked();
396     assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
397            "Converting to packed did not help!");
398   }
399 
400   // Append tail padding if necessary.
401   AppendTailPadding(Layout.getSize());
402 
403   assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
404          "Tail padding mismatch!");
405 
406   return true;
407 }
408 
409 llvm::Constant *ConstStructBuilder::
410   BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
411   ConstStructBuilder Builder(CGM, CGF);
412 
413   if (!Builder.Build(ILE))
414     return 0;
415 
416   llvm::Constant *Result =
417   llvm::ConstantStruct::get(CGM.getLLVMContext(),
418                             Builder.Elements, Builder.Packed);
419 
420   assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
421                                   Builder.getAlignment(Result)) ==
422          Builder.getSizeInBytes(Result) && "Size mismatch!");
423 
424   return Result;
425 }
426 
427 
428 //===----------------------------------------------------------------------===//
429 //                             ConstExprEmitter
430 //===----------------------------------------------------------------------===//
431 
432 class ConstExprEmitter :
433   public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
434   CodeGenModule &CGM;
435   CodeGenFunction *CGF;
436   llvm::LLVMContext &VMContext;
437 public:
438   ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
439     : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
440   }
441 
442   //===--------------------------------------------------------------------===//
443   //                            Visitor Methods
444   //===--------------------------------------------------------------------===//
445 
446   llvm::Constant *VisitStmt(Stmt *S) {
447     return 0;
448   }
449 
450   llvm::Constant *VisitParenExpr(ParenExpr *PE) {
451     return Visit(PE->getSubExpr());
452   }
453 
454   llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
455     return Visit(E->getInitializer());
456   }
457 
458   llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
459     assert(MD->isInstance() && "Member function must not be static!");
460 
461     MD = MD->getCanonicalDecl();
462 
463     const llvm::Type *PtrDiffTy =
464       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
465 
466     llvm::Constant *Values[2];
467 
468     Values[0] = CGM.GetCXXMemberFunctionPointerValue(MD);
469 
470     // The adjustment will always be 0.
471     Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
472 
473     return llvm::ConstantStruct::get(CGM.getLLVMContext(),
474                                      Values, 2, /*Packed=*/false);
475   }
476 
477   llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
478     if (const MemberPointerType *MPT =
479         E->getType()->getAs<MemberPointerType>()) {
480       QualType T = MPT->getPointeeType();
481       DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
482 
483       NamedDecl *ND = DRE->getDecl();
484       if (T->isFunctionProtoType())
485         return EmitMemberFunctionPointer(cast<CXXMethodDecl>(ND));
486 
487       // We have a pointer to data member.
488       return CGM.EmitPointerToDataMember(cast<FieldDecl>(ND));
489     }
490 
491     return 0;
492   }
493 
494   llvm::Constant *VisitBinSub(BinaryOperator *E) {
495     // This must be a pointer/pointer subtraction.  This only happens for
496     // address of label.
497     if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
498        !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
499       return 0;
500 
501     llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
502                                                E->getLHS()->getType(), CGF);
503     llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
504                                                E->getRHS()->getType(), CGF);
505 
506     const llvm::Type *ResultType = ConvertType(E->getType());
507     LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
508     RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
509 
510     // No need to divide by element size, since addr of label is always void*,
511     // which has size 1 in GNUish.
512     return llvm::ConstantExpr::getSub(LHS, RHS);
513   }
514 
515   llvm::Constant *VisitCastExpr(CastExpr* E) {
516     switch (E->getCastKind()) {
517     case CastExpr::CK_ToUnion: {
518       // GCC cast to union extension
519       assert(E->getType()->isUnionType() &&
520              "Destination type is not union type!");
521       const llvm::Type *Ty = ConvertType(E->getType());
522       Expr *SubExpr = E->getSubExpr();
523 
524       llvm::Constant *C =
525         CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
526       if (!C)
527         return 0;
528 
529       // Build a struct with the union sub-element as the first member,
530       // and padded to the appropriate size
531       std::vector<llvm::Constant*> Elts;
532       std::vector<const llvm::Type*> Types;
533       Elts.push_back(C);
534       Types.push_back(C->getType());
535       unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
536       unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
537 
538       assert(CurSize <= TotalSize && "Union size mismatch!");
539       if (unsigned NumPadBytes = TotalSize - CurSize) {
540         const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
541         if (NumPadBytes > 1)
542           Ty = llvm::ArrayType::get(Ty, NumPadBytes);
543 
544         Elts.push_back(llvm::UndefValue::get(Ty));
545         Types.push_back(Ty);
546       }
547 
548       llvm::StructType* STy =
549         llvm::StructType::get(C->getType()->getContext(), Types, false);
550       return llvm::ConstantStruct::get(STy, Elts);
551     }
552     case CastExpr::CK_NullToMemberPointer:
553       return CGM.EmitNullConstant(E->getType());
554 
555     case CastExpr::CK_BaseToDerivedMemberPointer: {
556       Expr *SubExpr = E->getSubExpr();
557 
558       const MemberPointerType *SrcTy =
559         SubExpr->getType()->getAs<MemberPointerType>();
560       const MemberPointerType *DestTy =
561         E->getType()->getAs<MemberPointerType>();
562 
563       const CXXRecordDecl *DerivedClass =
564         cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
565 
566       if (SrcTy->getPointeeType()->isFunctionProtoType()) {
567         llvm::Constant *C =
568           CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
569         if (!C)
570           return 0;
571 
572         llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
573 
574         // Check if we need to update the adjustment.
575         if (llvm::Constant *Offset =
576             CGM.GetNonVirtualBaseClassOffset(DerivedClass, E->getBasePath())) {
577           llvm::Constant *Values[2];
578 
579           Values[0] = CS->getOperand(0);
580           Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
581           return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
582                                            /*Packed=*/false);
583         }
584 
585         return CS;
586       }
587     }
588 
589     case CastExpr::CK_BitCast:
590       // This must be a member function pointer cast.
591       return Visit(E->getSubExpr());
592 
593     default: {
594       // FIXME: This should be handled by the CK_NoOp cast kind.
595       // Explicit and implicit no-op casts
596       QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
597       if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
598         return Visit(E->getSubExpr());
599 
600       // Handle integer->integer casts for address-of-label differences.
601       if (Ty->isIntegerType() && SubTy->isIntegerType() &&
602           CGF) {
603         llvm::Value *Src = Visit(E->getSubExpr());
604         if (Src == 0) return 0;
605 
606         // Use EmitScalarConversion to perform the conversion.
607         return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
608       }
609 
610       return 0;
611     }
612     }
613   }
614 
615   llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
616     return Visit(DAE->getExpr());
617   }
618 
619   llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
620     unsigned NumInitElements = ILE->getNumInits();
621     if (NumInitElements == 1 &&
622         (isa<StringLiteral>(ILE->getInit(0)) ||
623          isa<ObjCEncodeExpr>(ILE->getInit(0))))
624       return Visit(ILE->getInit(0));
625 
626     std::vector<llvm::Constant*> Elts;
627     const llvm::ArrayType *AType =
628         cast<llvm::ArrayType>(ConvertType(ILE->getType()));
629     const llvm::Type *ElemTy = AType->getElementType();
630     unsigned NumElements = AType->getNumElements();
631 
632     // Initialising an array requires us to automatically
633     // initialise any elements that have not been initialised explicitly
634     unsigned NumInitableElts = std::min(NumInitElements, NumElements);
635 
636     // Copy initializer elements.
637     unsigned i = 0;
638     bool RewriteType = false;
639     for (; i < NumInitableElts; ++i) {
640       Expr *Init = ILE->getInit(i);
641       llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
642       if (!C)
643         return 0;
644       RewriteType |= (C->getType() != ElemTy);
645       Elts.push_back(C);
646     }
647 
648     // Initialize remaining array elements.
649     // FIXME: This doesn't handle member pointers correctly!
650     for (; i < NumElements; ++i)
651       Elts.push_back(llvm::Constant::getNullValue(ElemTy));
652 
653     if (RewriteType) {
654       // FIXME: Try to avoid packing the array
655       std::vector<const llvm::Type*> Types;
656       for (unsigned i = 0; i < Elts.size(); ++i)
657         Types.push_back(Elts[i]->getType());
658       const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
659                                                             Types, true);
660       return llvm::ConstantStruct::get(SType, Elts);
661     }
662 
663     return llvm::ConstantArray::get(AType, Elts);
664   }
665 
666   llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
667     return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
668   }
669 
670   llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
671     return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
672   }
673 
674   llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
675     return CGM.EmitNullConstant(E->getType());
676   }
677 
678   llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
679     if (ILE->getType()->isScalarType()) {
680       // We have a scalar in braces. Just use the first element.
681       if (ILE->getNumInits() > 0) {
682         Expr *Init = ILE->getInit(0);
683         return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
684       }
685       return CGM.EmitNullConstant(ILE->getType());
686     }
687 
688     if (ILE->getType()->isArrayType())
689       return EmitArrayInitialization(ILE);
690 
691     if (ILE->getType()->isRecordType())
692       return EmitStructInitialization(ILE);
693 
694     if (ILE->getType()->isUnionType())
695       return EmitUnionInitialization(ILE);
696 
697     // If ILE was a constant vector, we would have handled it already.
698     if (ILE->getType()->isVectorType())
699       return 0;
700 
701     assert(0 && "Unable to handle InitListExpr");
702     // Get rid of control reaches end of void function warning.
703     // Not reached.
704     return 0;
705   }
706 
707   llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
708     if (!E->getConstructor()->isTrivial())
709       return 0;
710 
711     QualType Ty = E->getType();
712 
713     // FIXME: We should not have to call getBaseElementType here.
714     const RecordType *RT =
715       CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
716     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
717 
718     // If the class doesn't have a trivial destructor, we can't emit it as a
719     // constant expr.
720     if (!RD->hasTrivialDestructor())
721       return 0;
722 
723     // Only copy and default constructors can be trivial.
724 
725 
726     if (E->getNumArgs()) {
727       assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
728       assert(E->getConstructor()->isCopyConstructor() &&
729              "trivial ctor has argument but isn't a copy ctor");
730 
731       Expr *Arg = E->getArg(0);
732       assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
733              "argument to copy ctor is of wrong type");
734 
735       return Visit(Arg);
736     }
737 
738     return CGM.EmitNullConstant(Ty);
739   }
740 
741   llvm::Constant *VisitStringLiteral(StringLiteral *E) {
742     assert(!E->getType()->isPointerType() && "Strings are always arrays");
743 
744     // This must be a string initializing an array in a static initializer.
745     // Don't emit it as the address of the string, emit the string data itself
746     // as an inline array.
747     return llvm::ConstantArray::get(VMContext,
748                                     CGM.GetStringForStringLiteral(E), false);
749   }
750 
751   llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
752     // This must be an @encode initializing an array in a static initializer.
753     // Don't emit it as the address of the string, emit the string data itself
754     // as an inline array.
755     std::string Str;
756     CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
757     const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
758 
759     // Resize the string to the right size, adding zeros at the end, or
760     // truncating as needed.
761     Str.resize(CAT->getSize().getZExtValue(), '\0');
762     return llvm::ConstantArray::get(VMContext, Str, false);
763   }
764 
765   llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
766     return Visit(E->getSubExpr());
767   }
768 
769   // Utility methods
770   const llvm::Type *ConvertType(QualType T) {
771     return CGM.getTypes().ConvertType(T);
772   }
773 
774 public:
775   llvm::Constant *EmitLValue(Expr *E) {
776     switch (E->getStmtClass()) {
777     default: break;
778     case Expr::CompoundLiteralExprClass: {
779       // Note that due to the nature of compound literals, this is guaranteed
780       // to be the only use of the variable, so we just generate it here.
781       CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
782       llvm::Constant* C = Visit(CLE->getInitializer());
783       // FIXME: "Leaked" on failure.
784       if (C)
785         C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
786                                      E->getType().isConstant(CGM.getContext()),
787                                      llvm::GlobalValue::InternalLinkage,
788                                      C, ".compoundliteral", 0, false,
789                                      E->getType().getAddressSpace());
790       return C;
791     }
792     case Expr::DeclRefExprClass: {
793       ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
794       if (Decl->hasAttr<WeakRefAttr>())
795 	return CGM.GetWeakRefReference(Decl);
796       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
797         return CGM.GetAddrOfFunction(FD);
798       if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
799         // We can never refer to a variable with local storage.
800         if (!VD->hasLocalStorage()) {
801           if (VD->isFileVarDecl() || VD->hasExternalStorage())
802             return CGM.GetAddrOfGlobalVar(VD);
803           else if (VD->isBlockVarDecl()) {
804             assert(CGF && "Can't access static local vars without CGF");
805             return CGF->GetAddrOfStaticLocalVar(VD);
806           }
807         }
808       }
809       break;
810     }
811     case Expr::StringLiteralClass:
812       return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
813     case Expr::ObjCEncodeExprClass:
814       return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
815     case Expr::ObjCStringLiteralClass: {
816       ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
817       llvm::Constant *C =
818           CGM.getObjCRuntime().GenerateConstantString(SL->getString());
819       return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
820     }
821     case Expr::PredefinedExprClass: {
822       unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
823       if (CGF) {
824         LValue Res = CGF->EmitPredefinedFunctionName(Type);
825         return cast<llvm::Constant>(Res.getAddress());
826       } else if (Type == PredefinedExpr::PrettyFunction) {
827         return CGM.GetAddrOfConstantCString("top level", ".tmp");
828       }
829 
830       return CGM.GetAddrOfConstantCString("", ".tmp");
831     }
832     case Expr::AddrLabelExprClass: {
833       assert(CGF && "Invalid address of label expression outside function.");
834       llvm::Constant *Ptr =
835         CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
836       return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
837     }
838     case Expr::CallExprClass: {
839       CallExpr* CE = cast<CallExpr>(E);
840       unsigned builtin = CE->isBuiltinCall(CGM.getContext());
841       if (builtin !=
842             Builtin::BI__builtin___CFStringMakeConstantString &&
843           builtin !=
844             Builtin::BI__builtin___NSStringMakeConstantString)
845         break;
846       const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
847       const StringLiteral *Literal = cast<StringLiteral>(Arg);
848       if (builtin ==
849             Builtin::BI__builtin___NSStringMakeConstantString) {
850         return CGM.getObjCRuntime().GenerateConstantString(Literal);
851       }
852       // FIXME: need to deal with UCN conversion issues.
853       return CGM.GetAddrOfConstantCFString(Literal);
854     }
855     case Expr::BlockExprClass: {
856       std::string FunctionName;
857       if (CGF)
858         FunctionName = CGF->CurFn->getName();
859       else
860         FunctionName = "global";
861 
862       return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
863     }
864     }
865 
866     return 0;
867   }
868 };
869 
870 }  // end anonymous namespace.
871 
872 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
873                                                 QualType DestType,
874                                                 CodeGenFunction *CGF) {
875   Expr::EvalResult Result;
876 
877   bool Success = false;
878 
879   if (DestType->isReferenceType())
880     Success = E->EvaluateAsLValue(Result, Context);
881   else
882     Success = E->Evaluate(Result, Context);
883 
884   if (Success && !Result.HasSideEffects) {
885     switch (Result.Val.getKind()) {
886     case APValue::Uninitialized:
887       assert(0 && "Constant expressions should be initialized.");
888       return 0;
889     case APValue::LValue: {
890       const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
891       llvm::Constant *Offset =
892         llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
893                                Result.Val.getLValueOffset().getQuantity());
894 
895       llvm::Constant *C;
896       if (const Expr *LVBase = Result.Val.getLValueBase()) {
897         C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
898 
899         // Apply offset if necessary.
900         if (!Offset->isNullValue()) {
901           const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
902           llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
903           Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
904           C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
905         }
906 
907         // Convert to the appropriate type; this could be an lvalue for
908         // an integer.
909         if (isa<llvm::PointerType>(DestTy))
910           return llvm::ConstantExpr::getBitCast(C, DestTy);
911 
912         return llvm::ConstantExpr::getPtrToInt(C, DestTy);
913       } else {
914         C = Offset;
915 
916         // Convert to the appropriate type; this could be an lvalue for
917         // an integer.
918         if (isa<llvm::PointerType>(DestTy))
919           return llvm::ConstantExpr::getIntToPtr(C, DestTy);
920 
921         // If the types don't match this should only be a truncate.
922         if (C->getType() != DestTy)
923           return llvm::ConstantExpr::getTrunc(C, DestTy);
924 
925         return C;
926       }
927     }
928     case APValue::Int: {
929       llvm::Constant *C = llvm::ConstantInt::get(VMContext,
930                                                  Result.Val.getInt());
931 
932       if (C->getType()->isIntegerTy(1)) {
933         const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
934         C = llvm::ConstantExpr::getZExt(C, BoolTy);
935       }
936       return C;
937     }
938     case APValue::ComplexInt: {
939       llvm::Constant *Complex[2];
940 
941       Complex[0] = llvm::ConstantInt::get(VMContext,
942                                           Result.Val.getComplexIntReal());
943       Complex[1] = llvm::ConstantInt::get(VMContext,
944                                           Result.Val.getComplexIntImag());
945 
946       // FIXME: the target may want to specify that this is packed.
947       return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
948     }
949     case APValue::Float:
950       return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
951     case APValue::ComplexFloat: {
952       llvm::Constant *Complex[2];
953 
954       Complex[0] = llvm::ConstantFP::get(VMContext,
955                                          Result.Val.getComplexFloatReal());
956       Complex[1] = llvm::ConstantFP::get(VMContext,
957                                          Result.Val.getComplexFloatImag());
958 
959       // FIXME: the target may want to specify that this is packed.
960       return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
961     }
962     case APValue::Vector: {
963       llvm::SmallVector<llvm::Constant *, 4> Inits;
964       unsigned NumElts = Result.Val.getVectorLength();
965 
966       for (unsigned i = 0; i != NumElts; ++i) {
967         APValue &Elt = Result.Val.getVectorElt(i);
968         if (Elt.isInt())
969           Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
970         else
971           Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
972       }
973       return llvm::ConstantVector::get(&Inits[0], Inits.size());
974     }
975     }
976   }
977 
978   llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
979   if (C && C->getType()->isIntegerTy(1)) {
980     const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
981     C = llvm::ConstantExpr::getZExt(C, BoolTy);
982   }
983   return C;
984 }
985 
986 static void
987 FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
988                              std::vector<llvm::Constant *> &Elements,
989                              uint64_t StartOffset) {
990   assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
991 
992   if (!CGM.getTypes().ContainsPointerToDataMember(T))
993     return;
994 
995   if (const ConstantArrayType *CAT =
996         CGM.getContext().getAsConstantArrayType(T)) {
997     QualType ElementTy = CAT->getElementType();
998     uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
999 
1000     for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
1001       FillInNullDataMemberPointers(CGM, ElementTy, Elements,
1002                                    StartOffset + I * ElementSize);
1003     }
1004   } else if (const RecordType *RT = T->getAs<RecordType>()) {
1005     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1006     const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
1007 
1008     // Go through all bases and fill in any null pointer to data members.
1009     for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1010          E = RD->bases_end(); I != E; ++I) {
1011       if (I->isVirtual()) {
1012         // FIXME: We should initialize null pointer to data members in virtual
1013         // bases here.
1014         continue;
1015       }
1016 
1017       const CXXRecordDecl *BaseDecl =
1018       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1019 
1020       // Ignore empty bases.
1021       if (BaseDecl->isEmpty())
1022         continue;
1023 
1024       // Ignore bases that don't have any pointer to data members.
1025       if (!CGM.getTypes().ContainsPointerToDataMember(BaseDecl))
1026         continue;
1027 
1028       uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
1029       FillInNullDataMemberPointers(CGM, I->getType(),
1030                                    Elements, StartOffset + BaseOffset);
1031     }
1032 
1033     // Visit all fields.
1034     unsigned FieldNo = 0;
1035     for (RecordDecl::field_iterator I = RD->field_begin(),
1036          E = RD->field_end(); I != E; ++I, ++FieldNo) {
1037       QualType FieldType = I->getType();
1038 
1039       if (!CGM.getTypes().ContainsPointerToDataMember(FieldType))
1040         continue;
1041 
1042       uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1043       FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1044     }
1045   } else {
1046     assert(T->isMemberPointerType() && "Should only see member pointers here!");
1047     assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1048            "Should only see pointers to data members here!");
1049 
1050     uint64_t StartIndex = StartOffset / 8;
1051     uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
1052 
1053     llvm::Constant *NegativeOne =
1054       llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
1055                              -1ULL, /*isSigned=*/true);
1056 
1057     // Fill in the null data member pointer.
1058     for (uint64_t I = StartIndex; I != EndIndex; ++I)
1059       Elements[I] = NegativeOne;
1060   }
1061 }
1062 
1063 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1064   if (!getTypes().ContainsPointerToDataMember(T))
1065     return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1066 
1067   if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1068 
1069     QualType ElementTy = CAT->getElementType();
1070 
1071     llvm::Constant *Element = EmitNullConstant(ElementTy);
1072     unsigned NumElements = CAT->getSize().getZExtValue();
1073     std::vector<llvm::Constant *> Array(NumElements);
1074     for (unsigned i = 0; i != NumElements; ++i)
1075       Array[i] = Element;
1076 
1077     const llvm::ArrayType *ATy =
1078       cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1079     return llvm::ConstantArray::get(ATy, Array);
1080   }
1081 
1082   if (const RecordType *RT = T->getAs<RecordType>()) {
1083     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1084     const llvm::StructType *STy =
1085       cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
1086     unsigned NumElements = STy->getNumElements();
1087     std::vector<llvm::Constant *> Elements(NumElements);
1088 
1089     const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
1090 
1091     // Go through all bases and fill in any null pointer to data members.
1092     for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1093          E = RD->bases_end(); I != E; ++I) {
1094       if (I->isVirtual()) {
1095         // FIXME: We should initialize null pointer to data members in virtual
1096         // bases here.
1097         continue;
1098       }
1099 
1100       const CXXRecordDecl *BaseDecl =
1101         cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1102 
1103       // Ignore empty bases.
1104       if (BaseDecl->isEmpty())
1105         continue;
1106 
1107       // Ignore bases that don't have any pointer to data members.
1108       if (!getTypes().ContainsPointerToDataMember(BaseDecl))
1109         continue;
1110 
1111       // Currently, all bases are arrays of i8. Figure out how many elements
1112       // this base array has.
1113       unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
1114       const llvm::ArrayType *BaseArrayTy =
1115         cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
1116 
1117       unsigned NumBaseElements = BaseArrayTy->getNumElements();
1118       std::vector<llvm::Constant *> BaseElements(NumBaseElements);
1119 
1120       // Now fill in null data member pointers.
1121       FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
1122 
1123       // Now go through all other elements and zero them out.
1124       if (NumBaseElements) {
1125         llvm::Constant *Zero =
1126           llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
1127 
1128         for (unsigned I = 0; I != NumBaseElements; ++I) {
1129           if (!BaseElements[I])
1130             BaseElements[I] = Zero;
1131         }
1132       }
1133 
1134       Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
1135                                                        BaseElements);
1136     }
1137 
1138     for (RecordDecl::field_iterator I = RD->field_begin(),
1139          E = RD->field_end(); I != E; ++I) {
1140       const FieldDecl *FD = *I;
1141 
1142       // Ignore bit fields.
1143       if (FD->isBitField())
1144         continue;
1145 
1146       unsigned FieldNo = Layout.getLLVMFieldNo(FD);
1147       Elements[FieldNo] = EmitNullConstant(FD->getType());
1148     }
1149 
1150     // Now go through all other fields and zero them out.
1151     for (unsigned i = 0; i != NumElements; ++i) {
1152       if (!Elements[i])
1153         Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
1154     }
1155 
1156     return llvm::ConstantStruct::get(STy, Elements);
1157   }
1158 
1159   assert(T->isMemberPointerType() && "Should only see member pointers here!");
1160   assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1161          "Should only see pointers to data members here!");
1162 
1163   // Itanium C++ ABI 2.3:
1164   //   A NULL pointer is represented as -1.
1165   return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
1166                                 /*isSigned=*/true);
1167 }
1168 
1169 llvm::Constant *
1170 CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
1171 
1172   // Itanium C++ ABI 2.3:
1173   //   A pointer to data member is an offset from the base address of the class
1174   //   object containing it, represented as a ptrdiff_t
1175 
1176   const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(FD->getParent());
1177   QualType ClassType =
1178     getContext().getTypeDeclType(const_cast<CXXRecordDecl *>(ClassDecl));
1179 
1180   const llvm::StructType *ClassLTy =
1181     cast<llvm::StructType>(getTypes().ConvertType(ClassType));
1182 
1183   const CGRecordLayout &RL =
1184     getTypes().getCGRecordLayout(FD->getParent());
1185   unsigned FieldNo = RL.getLLVMFieldNo(FD);
1186   uint64_t Offset =
1187     getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
1188 
1189   const llvm::Type *PtrDiffTy =
1190     getTypes().ConvertType(getContext().getPointerDiffType());
1191 
1192   return llvm::ConstantInt::get(PtrDiffTy, Offset);
1193 }
1194