1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCXXABI.h"
17 #include "CGObjCRuntime.h"
18 #include "CodeGenModule.h"
19 #include "CGDebugInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "llvm/Constants.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Intrinsics.h"
29 #include "llvm/Module.h"
30 #include "llvm/Support/CFG.h"
31 #include "llvm/Target/TargetData.h"
32 #include <cstdarg>
33 
34 using namespace clang;
35 using namespace CodeGen;
36 using llvm::Value;
37 
38 //===----------------------------------------------------------------------===//
39 //                         Scalar Expression Emitter
40 //===----------------------------------------------------------------------===//
41 
42 namespace {
43 struct BinOpInfo {
44   Value *LHS;
45   Value *RHS;
46   QualType Ty;  // Computation Type.
47   BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
48   const Expr *E;      // Entire expr, for error unsupported.  May not be binop.
49 };
50 
51 static bool MustVisitNullValue(const Expr *E) {
52   // If a null pointer expression's type is the C++0x nullptr_t, then
53   // it's not necessarily a simple constant and it must be evaluated
54   // for its potential side effects.
55   return E->getType()->isNullPtrType();
56 }
57 
58 class ScalarExprEmitter
59   : public StmtVisitor<ScalarExprEmitter, Value*> {
60   CodeGenFunction &CGF;
61   CGBuilderTy &Builder;
62   bool IgnoreResultAssign;
63   llvm::LLVMContext &VMContext;
64 public:
65 
66   ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
67     : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
68       VMContext(cgf.getLLVMContext()) {
69   }
70 
71   //===--------------------------------------------------------------------===//
72   //                               Utilities
73   //===--------------------------------------------------------------------===//
74 
75   bool TestAndClearIgnoreResultAssign() {
76     bool I = IgnoreResultAssign;
77     IgnoreResultAssign = false;
78     return I;
79   }
80 
81   const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
82   LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
83   LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
84 
85   Value *EmitLoadOfLValue(LValue LV, QualType T) {
86     return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
87   }
88 
89   /// EmitLoadOfLValue - Given an expression with complex type that represents a
90   /// value l-value, this method emits the address of the l-value, then loads
91   /// and returns the result.
92   Value *EmitLoadOfLValue(const Expr *E) {
93     return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType());
94   }
95 
96   /// EmitConversionToBool - Convert the specified expression value to a
97   /// boolean (i1) truth value.  This is equivalent to "Val != 0".
98   Value *EmitConversionToBool(Value *Src, QualType DstTy);
99 
100   /// EmitScalarConversion - Emit a conversion from the specified type to the
101   /// specified destination type, both of which are LLVM scalar types.
102   Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
103 
104   /// EmitComplexToScalarConversion - Emit a conversion from the specified
105   /// complex type to the specified destination type, where the destination type
106   /// is an LLVM scalar type.
107   Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
108                                        QualType SrcTy, QualType DstTy);
109 
110   /// EmitNullValue - Emit a value that corresponds to null for the given type.
111   Value *EmitNullValue(QualType Ty);
112 
113   /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
114   Value *EmitFloatToBoolConversion(Value *V) {
115     // Compare against 0.0 for fp scalars.
116     llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
117     return Builder.CreateFCmpUNE(V, Zero, "tobool");
118   }
119 
120   /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
121   Value *EmitPointerToBoolConversion(Value *V) {
122     Value *Zero = llvm::ConstantPointerNull::get(
123                                       cast<llvm::PointerType>(V->getType()));
124     return Builder.CreateICmpNE(V, Zero, "tobool");
125   }
126 
127   Value *EmitIntToBoolConversion(Value *V) {
128     // Because of the type rules of C, we often end up computing a
129     // logical value, then zero extending it to int, then wanting it
130     // as a logical value again.  Optimize this common case.
131     if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
132       if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
133         Value *Result = ZI->getOperand(0);
134         // If there aren't any more uses, zap the instruction to save space.
135         // Note that there can be more uses, for example if this
136         // is the result of an assignment.
137         if (ZI->use_empty())
138           ZI->eraseFromParent();
139         return Result;
140       }
141     }
142 
143     const llvm::IntegerType *Ty = cast<llvm::IntegerType>(V->getType());
144     Value *Zero = llvm::ConstantInt::get(Ty, 0);
145     return Builder.CreateICmpNE(V, Zero, "tobool");
146   }
147 
148   //===--------------------------------------------------------------------===//
149   //                            Visitor Methods
150   //===--------------------------------------------------------------------===//
151 
152   Value *Visit(Expr *E) {
153     return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
154   }
155 
156   Value *VisitStmt(Stmt *S) {
157     S->dump(CGF.getContext().getSourceManager());
158     assert(0 && "Stmt can't have complex result type!");
159     return 0;
160   }
161   Value *VisitExpr(Expr *S);
162 
163   Value *VisitParenExpr(ParenExpr *PE) {
164     return Visit(PE->getSubExpr());
165   }
166 
167   // Leaves.
168   Value *VisitIntegerLiteral(const IntegerLiteral *E) {
169     return llvm::ConstantInt::get(VMContext, E->getValue());
170   }
171   Value *VisitFloatingLiteral(const FloatingLiteral *E) {
172     return llvm::ConstantFP::get(VMContext, E->getValue());
173   }
174   Value *VisitCharacterLiteral(const CharacterLiteral *E) {
175     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
176   }
177   Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
178     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
179   }
180   Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
181     return EmitNullValue(E->getType());
182   }
183   Value *VisitGNUNullExpr(const GNUNullExpr *E) {
184     return EmitNullValue(E->getType());
185   }
186   Value *VisitOffsetOfExpr(OffsetOfExpr *E);
187   Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
188   Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
189     llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
190     return Builder.CreateBitCast(V, ConvertType(E->getType()));
191   }
192 
193   Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
194     return llvm::ConstantInt::get(ConvertType(E->getType()),
195                                   E->getPackLength());
196   }
197 
198   Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
199     if (E->isGLValue())
200       return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getType());
201 
202     // Otherwise, assume the mapping is the scalar directly.
203     return CGF.getOpaqueRValueMapping(E).getScalarVal();
204   }
205 
206   // l-values.
207   Value *VisitDeclRefExpr(DeclRefExpr *E) {
208     Expr::EvalResult Result;
209     if (!E->Evaluate(Result, CGF.getContext()))
210       return EmitLoadOfLValue(E);
211 
212     assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
213 
214     llvm::Constant *C;
215     if (Result.Val.isInt()) {
216       C = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
217     } else if (Result.Val.isFloat()) {
218       C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
219     } else {
220       return EmitLoadOfLValue(E);
221     }
222 
223     // Make sure we emit a debug reference to the global variable.
224     if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
225       if (!CGF.getContext().DeclMustBeEmitted(VD))
226         CGF.EmitDeclRefExprDbgValue(E, C);
227     } else if (isa<EnumConstantDecl>(E->getDecl())) {
228       CGF.EmitDeclRefExprDbgValue(E, C);
229     }
230 
231     return C;
232   }
233   Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
234     return CGF.EmitObjCSelectorExpr(E);
235   }
236   Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
237     return CGF.EmitObjCProtocolExpr(E);
238   }
239   Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
240     return EmitLoadOfLValue(E);
241   }
242   Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
243     assert(E->getObjectKind() == OK_Ordinary &&
244            "reached property reference without lvalue-to-rvalue");
245     return EmitLoadOfLValue(E);
246   }
247   Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
248     if (E->getMethodDecl() &&
249         E->getMethodDecl()->getResultType()->isReferenceType())
250       return EmitLoadOfLValue(E);
251     return CGF.EmitObjCMessageExpr(E).getScalarVal();
252   }
253 
254   Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
255     LValue LV = CGF.EmitObjCIsaExpr(E);
256     Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
257     return V;
258   }
259 
260   Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
261   Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
262   Value *VisitMemberExpr(MemberExpr *E);
263   Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
264   Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
265     return EmitLoadOfLValue(E);
266   }
267 
268   Value *VisitInitListExpr(InitListExpr *E);
269 
270   Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
271     return CGF.CGM.EmitNullConstant(E->getType());
272   }
273   Value *VisitCastExpr(CastExpr *E) {
274     // Make sure to evaluate VLA bounds now so that we have them for later.
275     if (E->getType()->isVariablyModifiedType())
276       CGF.EmitVLASize(E->getType());
277 
278     return EmitCastExpr(E);
279   }
280   Value *EmitCastExpr(CastExpr *E);
281 
282   Value *VisitCallExpr(const CallExpr *E) {
283     if (E->getCallReturnType()->isReferenceType())
284       return EmitLoadOfLValue(E);
285 
286     return CGF.EmitCallExpr(E).getScalarVal();
287   }
288 
289   Value *VisitStmtExpr(const StmtExpr *E);
290 
291   Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
292 
293   // Unary Operators.
294   Value *VisitUnaryPostDec(const UnaryOperator *E) {
295     LValue LV = EmitLValue(E->getSubExpr());
296     return EmitScalarPrePostIncDec(E, LV, false, false);
297   }
298   Value *VisitUnaryPostInc(const UnaryOperator *E) {
299     LValue LV = EmitLValue(E->getSubExpr());
300     return EmitScalarPrePostIncDec(E, LV, true, false);
301   }
302   Value *VisitUnaryPreDec(const UnaryOperator *E) {
303     LValue LV = EmitLValue(E->getSubExpr());
304     return EmitScalarPrePostIncDec(E, LV, false, true);
305   }
306   Value *VisitUnaryPreInc(const UnaryOperator *E) {
307     LValue LV = EmitLValue(E->getSubExpr());
308     return EmitScalarPrePostIncDec(E, LV, true, true);
309   }
310 
311   llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
312                                                llvm::Value *InVal,
313                                                llvm::Value *NextVal,
314                                                bool IsInc);
315 
316   llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
317                                        bool isInc, bool isPre);
318 
319 
320   Value *VisitUnaryAddrOf(const UnaryOperator *E) {
321     if (isa<MemberPointerType>(E->getType())) // never sugared
322       return CGF.CGM.getMemberPointerConstant(E);
323 
324     return EmitLValue(E->getSubExpr()).getAddress();
325   }
326   Value *VisitUnaryDeref(const UnaryOperator *E) {
327     if (E->getType()->isVoidType())
328       return Visit(E->getSubExpr()); // the actual value should be unused
329     return EmitLoadOfLValue(E);
330   }
331   Value *VisitUnaryPlus(const UnaryOperator *E) {
332     // This differs from gcc, though, most likely due to a bug in gcc.
333     TestAndClearIgnoreResultAssign();
334     return Visit(E->getSubExpr());
335   }
336   Value *VisitUnaryMinus    (const UnaryOperator *E);
337   Value *VisitUnaryNot      (const UnaryOperator *E);
338   Value *VisitUnaryLNot     (const UnaryOperator *E);
339   Value *VisitUnaryReal     (const UnaryOperator *E);
340   Value *VisitUnaryImag     (const UnaryOperator *E);
341   Value *VisitUnaryExtension(const UnaryOperator *E) {
342     return Visit(E->getSubExpr());
343   }
344 
345   // C++
346   Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
347     return Visit(DAE->getExpr());
348   }
349   Value *VisitCXXThisExpr(CXXThisExpr *TE) {
350     return CGF.LoadCXXThis();
351   }
352 
353   Value *VisitExprWithCleanups(ExprWithCleanups *E) {
354     return CGF.EmitExprWithCleanups(E).getScalarVal();
355   }
356   Value *VisitCXXNewExpr(const CXXNewExpr *E) {
357     return CGF.EmitCXXNewExpr(E);
358   }
359   Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
360     CGF.EmitCXXDeleteExpr(E);
361     return 0;
362   }
363   Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
364     return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
365   }
366 
367   Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
368     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
369   }
370 
371   Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
372     // C++ [expr.pseudo]p1:
373     //   The result shall only be used as the operand for the function call
374     //   operator (), and the result of such a call has type void. The only
375     //   effect is the evaluation of the postfix-expression before the dot or
376     //   arrow.
377     CGF.EmitScalarExpr(E->getBase());
378     return 0;
379   }
380 
381   Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
382     return EmitNullValue(E->getType());
383   }
384 
385   Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
386     CGF.EmitCXXThrowExpr(E);
387     return 0;
388   }
389 
390   Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
391     return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
392   }
393 
394   // Binary Operators.
395   Value *EmitMul(const BinOpInfo &Ops) {
396     if (Ops.Ty->hasSignedIntegerRepresentation()) {
397       switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
398       case LangOptions::SOB_Undefined:
399         return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
400       case LangOptions::SOB_Defined:
401         return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
402       case LangOptions::SOB_Trapping:
403         return EmitOverflowCheckedBinOp(Ops);
404       }
405     }
406 
407     if (Ops.LHS->getType()->isFPOrFPVectorTy())
408       return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
409     return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
410   }
411   bool isTrapvOverflowBehavior() {
412     return CGF.getContext().getLangOptions().getSignedOverflowBehavior()
413                == LangOptions::SOB_Trapping;
414   }
415   /// Create a binary op that checks for overflow.
416   /// Currently only supports +, - and *.
417   Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
418   // Emit the overflow BB when -ftrapv option is activated.
419   void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
420     Builder.SetInsertPoint(overflowBB);
421     llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
422     Builder.CreateCall(Trap);
423     Builder.CreateUnreachable();
424   }
425   // Check for undefined division and modulus behaviors.
426   void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
427                                                   llvm::Value *Zero,bool isDiv);
428   Value *EmitDiv(const BinOpInfo &Ops);
429   Value *EmitRem(const BinOpInfo &Ops);
430   Value *EmitAdd(const BinOpInfo &Ops);
431   Value *EmitSub(const BinOpInfo &Ops);
432   Value *EmitShl(const BinOpInfo &Ops);
433   Value *EmitShr(const BinOpInfo &Ops);
434   Value *EmitAnd(const BinOpInfo &Ops) {
435     return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
436   }
437   Value *EmitXor(const BinOpInfo &Ops) {
438     return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
439   }
440   Value *EmitOr (const BinOpInfo &Ops) {
441     return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
442   }
443 
444   BinOpInfo EmitBinOps(const BinaryOperator *E);
445   LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
446                             Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
447                                   Value *&Result);
448 
449   Value *EmitCompoundAssign(const CompoundAssignOperator *E,
450                             Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
451 
452   // Binary operators and binary compound assignment operators.
453 #define HANDLEBINOP(OP) \
454   Value *VisitBin ## OP(const BinaryOperator *E) {                         \
455     return Emit ## OP(EmitBinOps(E));                                      \
456   }                                                                        \
457   Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) {       \
458     return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP);          \
459   }
460   HANDLEBINOP(Mul)
461   HANDLEBINOP(Div)
462   HANDLEBINOP(Rem)
463   HANDLEBINOP(Add)
464   HANDLEBINOP(Sub)
465   HANDLEBINOP(Shl)
466   HANDLEBINOP(Shr)
467   HANDLEBINOP(And)
468   HANDLEBINOP(Xor)
469   HANDLEBINOP(Or)
470 #undef HANDLEBINOP
471 
472   // Comparisons.
473   Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
474                      unsigned SICmpOpc, unsigned FCmpOpc);
475 #define VISITCOMP(CODE, UI, SI, FP) \
476     Value *VisitBin##CODE(const BinaryOperator *E) { \
477       return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
478                          llvm::FCmpInst::FP); }
479   VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
480   VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
481   VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
482   VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
483   VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
484   VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
485 #undef VISITCOMP
486 
487   Value *VisitBinAssign     (const BinaryOperator *E);
488 
489   Value *VisitBinLAnd       (const BinaryOperator *E);
490   Value *VisitBinLOr        (const BinaryOperator *E);
491   Value *VisitBinComma      (const BinaryOperator *E);
492 
493   Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
494   Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
495 
496   // Other Operators.
497   Value *VisitBlockExpr(const BlockExpr *BE);
498   Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
499   Value *VisitChooseExpr(ChooseExpr *CE);
500   Value *VisitVAArgExpr(VAArgExpr *VE);
501   Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
502     return CGF.EmitObjCStringLiteral(E);
503   }
504 };
505 }  // end anonymous namespace.
506 
507 //===----------------------------------------------------------------------===//
508 //                                Utilities
509 //===----------------------------------------------------------------------===//
510 
511 /// EmitConversionToBool - Convert the specified expression value to a
512 /// boolean (i1) truth value.  This is equivalent to "Val != 0".
513 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
514   assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
515 
516   if (SrcType->isRealFloatingType())
517     return EmitFloatToBoolConversion(Src);
518 
519   if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
520     return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
521 
522   assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
523          "Unknown scalar type to convert");
524 
525   if (isa<llvm::IntegerType>(Src->getType()))
526     return EmitIntToBoolConversion(Src);
527 
528   assert(isa<llvm::PointerType>(Src->getType()));
529   return EmitPointerToBoolConversion(Src);
530 }
531 
532 /// EmitScalarConversion - Emit a conversion from the specified type to the
533 /// specified destination type, both of which are LLVM scalar types.
534 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
535                                                QualType DstType) {
536   SrcType = CGF.getContext().getCanonicalType(SrcType);
537   DstType = CGF.getContext().getCanonicalType(DstType);
538   if (SrcType == DstType) return Src;
539 
540   if (DstType->isVoidType()) return 0;
541 
542   // Handle conversions to bool first, they are special: comparisons against 0.
543   if (DstType->isBooleanType())
544     return EmitConversionToBool(Src, SrcType);
545 
546   const llvm::Type *DstTy = ConvertType(DstType);
547 
548   // Ignore conversions like int -> uint.
549   if (Src->getType() == DstTy)
550     return Src;
551 
552   // Handle pointer conversions next: pointers can only be converted to/from
553   // other pointers and integers. Check for pointer types in terms of LLVM, as
554   // some native types (like Obj-C id) may map to a pointer type.
555   if (isa<llvm::PointerType>(DstTy)) {
556     // The source value may be an integer, or a pointer.
557     if (isa<llvm::PointerType>(Src->getType()))
558       return Builder.CreateBitCast(Src, DstTy, "conv");
559 
560     assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
561     // First, convert to the correct width so that we control the kind of
562     // extension.
563     const llvm::Type *MiddleTy = CGF.IntPtrTy;
564     bool InputSigned = SrcType->isSignedIntegerType();
565     llvm::Value* IntResult =
566         Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
567     // Then, cast to pointer.
568     return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
569   }
570 
571   if (isa<llvm::PointerType>(Src->getType())) {
572     // Must be an ptr to int cast.
573     assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
574     return Builder.CreatePtrToInt(Src, DstTy, "conv");
575   }
576 
577   // A scalar can be splatted to an extended vector of the same element type
578   if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
579     // Cast the scalar to element type
580     QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
581     llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
582 
583     // Insert the element in element zero of an undef vector
584     llvm::Value *UnV = llvm::UndefValue::get(DstTy);
585     llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
586     UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
587 
588     // Splat the element across to all elements
589     llvm::SmallVector<llvm::Constant*, 16> Args;
590     unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
591     for (unsigned i = 0; i != NumElements; ++i)
592       Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
593 
594     llvm::Constant *Mask = llvm::ConstantVector::get(Args);
595     llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
596     return Yay;
597   }
598 
599   // Allow bitcast from vector to integer/fp of the same size.
600   if (isa<llvm::VectorType>(Src->getType()) ||
601       isa<llvm::VectorType>(DstTy))
602     return Builder.CreateBitCast(Src, DstTy, "conv");
603 
604   // Finally, we have the arithmetic types: real int/float.
605   if (isa<llvm::IntegerType>(Src->getType())) {
606     bool InputSigned = SrcType->isSignedIntegerType();
607     if (isa<llvm::IntegerType>(DstTy))
608       return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
609     else if (InputSigned)
610       return Builder.CreateSIToFP(Src, DstTy, "conv");
611     else
612       return Builder.CreateUIToFP(Src, DstTy, "conv");
613   }
614 
615   assert(Src->getType()->isFloatingPointTy() && "Unknown real conversion");
616   if (isa<llvm::IntegerType>(DstTy)) {
617     if (DstType->isSignedIntegerType())
618       return Builder.CreateFPToSI(Src, DstTy, "conv");
619     else
620       return Builder.CreateFPToUI(Src, DstTy, "conv");
621   }
622 
623   assert(DstTy->isFloatingPointTy() && "Unknown real conversion");
624   if (DstTy->getTypeID() < Src->getType()->getTypeID())
625     return Builder.CreateFPTrunc(Src, DstTy, "conv");
626   else
627     return Builder.CreateFPExt(Src, DstTy, "conv");
628 }
629 
630 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex
631 /// type to the specified destination type, where the destination type is an
632 /// LLVM scalar type.
633 Value *ScalarExprEmitter::
634 EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
635                               QualType SrcTy, QualType DstTy) {
636   // Get the source element type.
637   SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
638 
639   // Handle conversions to bool first, they are special: comparisons against 0.
640   if (DstTy->isBooleanType()) {
641     //  Complex != 0  -> (Real != 0) | (Imag != 0)
642     Src.first  = EmitScalarConversion(Src.first, SrcTy, DstTy);
643     Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
644     return Builder.CreateOr(Src.first, Src.second, "tobool");
645   }
646 
647   // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
648   // the imaginary part of the complex value is discarded and the value of the
649   // real part is converted according to the conversion rules for the
650   // corresponding real type.
651   return EmitScalarConversion(Src.first, SrcTy, DstTy);
652 }
653 
654 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
655   if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
656     return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
657 
658   return llvm::Constant::getNullValue(ConvertType(Ty));
659 }
660 
661 //===----------------------------------------------------------------------===//
662 //                            Visitor Methods
663 //===----------------------------------------------------------------------===//
664 
665 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
666   CGF.ErrorUnsupported(E, "scalar expression");
667   if (E->getType()->isVoidType())
668     return 0;
669   return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
670 }
671 
672 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
673   // Vector Mask Case
674   if (E->getNumSubExprs() == 2 ||
675       (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
676     Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
677     Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
678     Value *Mask;
679 
680     const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
681     unsigned LHSElts = LTy->getNumElements();
682 
683     if (E->getNumSubExprs() == 3) {
684       Mask = CGF.EmitScalarExpr(E->getExpr(2));
685 
686       // Shuffle LHS & RHS into one input vector.
687       llvm::SmallVector<llvm::Constant*, 32> concat;
688       for (unsigned i = 0; i != LHSElts; ++i) {
689         concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i));
690         concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
691       }
692 
693       Value* CV = llvm::ConstantVector::get(concat);
694       LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
695       LHSElts *= 2;
696     } else {
697       Mask = RHS;
698     }
699 
700     const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
701     llvm::Constant* EltMask;
702 
703     // Treat vec3 like vec4.
704     if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
705       EltMask = llvm::ConstantInt::get(MTy->getElementType(),
706                                        (1 << llvm::Log2_32(LHSElts+2))-1);
707     else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
708       EltMask = llvm::ConstantInt::get(MTy->getElementType(),
709                                        (1 << llvm::Log2_32(LHSElts+1))-1);
710     else
711       EltMask = llvm::ConstantInt::get(MTy->getElementType(),
712                                        (1 << llvm::Log2_32(LHSElts))-1);
713 
714     // Mask off the high bits of each shuffle index.
715     llvm::SmallVector<llvm::Constant *, 32> MaskV;
716     for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
717       MaskV.push_back(EltMask);
718 
719     Value* MaskBits = llvm::ConstantVector::get(MaskV);
720     Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
721 
722     // newv = undef
723     // mask = mask & maskbits
724     // for each elt
725     //   n = extract mask i
726     //   x = extract val n
727     //   newv = insert newv, x, i
728     const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
729                                                         MTy->getNumElements());
730     Value* NewV = llvm::UndefValue::get(RTy);
731     for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
732       Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i);
733       Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
734       Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
735 
736       // Handle vec3 special since the index will be off by one for the RHS.
737       if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
738         Value *cmpIndx, *newIndx;
739         cmpIndx = Builder.CreateICmpUGT(Indx,
740                                         llvm::ConstantInt::get(CGF.Int32Ty, 3),
741                                         "cmp_shuf_idx");
742         newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1),
743                                     "shuf_idx_adj");
744         Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
745       }
746       Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
747       NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins");
748     }
749     return NewV;
750   }
751 
752   Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
753   Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
754 
755   // Handle vec3 special since the index will be off by one for the RHS.
756   llvm::SmallVector<llvm::Constant*, 32> indices;
757   for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
758     llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)));
759     const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
760     if (VTy->getNumElements() == 3) {
761       if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
762         uint64_t cVal = CI->getZExtValue();
763         if (cVal > 3) {
764           C = llvm::ConstantInt::get(C->getType(), cVal-1);
765         }
766       }
767     }
768     indices.push_back(C);
769   }
770 
771   Value *SV = llvm::ConstantVector::get(indices);
772   return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
773 }
774 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
775   Expr::EvalResult Result;
776   if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
777     if (E->isArrow())
778       CGF.EmitScalarExpr(E->getBase());
779     else
780       EmitLValue(E->getBase());
781     return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
782   }
783 
784   // Emit debug info for aggregate now, if it was delayed to reduce
785   // debug info size.
786   CGDebugInfo *DI = CGF.getDebugInfo();
787   if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
788     QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
789     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
790       if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
791         DI->getOrCreateRecordType(PTy->getPointeeType(),
792                                   M->getParent()->getLocation());
793   }
794   return EmitLoadOfLValue(E);
795 }
796 
797 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
798   TestAndClearIgnoreResultAssign();
799 
800   // Emit subscript expressions in rvalue context's.  For most cases, this just
801   // loads the lvalue formed by the subscript expr.  However, we have to be
802   // careful, because the base of a vector subscript is occasionally an rvalue,
803   // so we can't get it as an lvalue.
804   if (!E->getBase()->getType()->isVectorType())
805     return EmitLoadOfLValue(E);
806 
807   // Handle the vector case.  The base must be a vector, the index must be an
808   // integer value.
809   Value *Base = Visit(E->getBase());
810   Value *Idx  = Visit(E->getIdx());
811   bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
812   Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
813   return Builder.CreateExtractElement(Base, Idx, "vecext");
814 }
815 
816 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
817                                   unsigned Off, const llvm::Type *I32Ty) {
818   int MV = SVI->getMaskValue(Idx);
819   if (MV == -1)
820     return llvm::UndefValue::get(I32Ty);
821   return llvm::ConstantInt::get(I32Ty, Off+MV);
822 }
823 
824 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
825   bool Ignore = TestAndClearIgnoreResultAssign();
826   (void)Ignore;
827   assert (Ignore == false && "init list ignored");
828   unsigned NumInitElements = E->getNumInits();
829 
830   if (E->hadArrayRangeDesignator())
831     CGF.ErrorUnsupported(E, "GNU array range designator extension");
832 
833   const llvm::VectorType *VType =
834     dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
835 
836   // We have a scalar in braces. Just use the first element.
837   if (!VType)
838     return Visit(E->getInit(0));
839 
840   unsigned ResElts = VType->getNumElements();
841 
842   // Loop over initializers collecting the Value for each, and remembering
843   // whether the source was swizzle (ExtVectorElementExpr).  This will allow
844   // us to fold the shuffle for the swizzle into the shuffle for the vector
845   // initializer, since LLVM optimizers generally do not want to touch
846   // shuffles.
847   unsigned CurIdx = 0;
848   bool VIsUndefShuffle = false;
849   llvm::Value *V = llvm::UndefValue::get(VType);
850   for (unsigned i = 0; i != NumInitElements; ++i) {
851     Expr *IE = E->getInit(i);
852     Value *Init = Visit(IE);
853     llvm::SmallVector<llvm::Constant*, 16> Args;
854 
855     const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
856 
857     // Handle scalar elements.  If the scalar initializer is actually one
858     // element of a different vector of the same width, use shuffle instead of
859     // extract+insert.
860     if (!VVT) {
861       if (isa<ExtVectorElementExpr>(IE)) {
862         llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
863 
864         if (EI->getVectorOperandType()->getNumElements() == ResElts) {
865           llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
866           Value *LHS = 0, *RHS = 0;
867           if (CurIdx == 0) {
868             // insert into undef -> shuffle (src, undef)
869             Args.push_back(C);
870             for (unsigned j = 1; j != ResElts; ++j)
871               Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
872 
873             LHS = EI->getVectorOperand();
874             RHS = V;
875             VIsUndefShuffle = true;
876           } else if (VIsUndefShuffle) {
877             // insert into undefshuffle && size match -> shuffle (v, src)
878             llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
879             for (unsigned j = 0; j != CurIdx; ++j)
880               Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
881             Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
882                                                   ResElts + C->getZExtValue()));
883             for (unsigned j = CurIdx + 1; j != ResElts; ++j)
884               Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
885 
886             LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
887             RHS = EI->getVectorOperand();
888             VIsUndefShuffle = false;
889           }
890           if (!Args.empty()) {
891             llvm::Constant *Mask = llvm::ConstantVector::get(Args);
892             V = Builder.CreateShuffleVector(LHS, RHS, Mask);
893             ++CurIdx;
894             continue;
895           }
896         }
897       }
898       Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
899       V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
900       VIsUndefShuffle = false;
901       ++CurIdx;
902       continue;
903     }
904 
905     unsigned InitElts = VVT->getNumElements();
906 
907     // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
908     // input is the same width as the vector being constructed, generate an
909     // optimized shuffle of the swizzle input into the result.
910     unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
911     if (isa<ExtVectorElementExpr>(IE)) {
912       llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
913       Value *SVOp = SVI->getOperand(0);
914       const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
915 
916       if (OpTy->getNumElements() == ResElts) {
917         for (unsigned j = 0; j != CurIdx; ++j) {
918           // If the current vector initializer is a shuffle with undef, merge
919           // this shuffle directly into it.
920           if (VIsUndefShuffle) {
921             Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
922                                       CGF.Int32Ty));
923           } else {
924             Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
925           }
926         }
927         for (unsigned j = 0, je = InitElts; j != je; ++j)
928           Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
929         for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
930           Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
931 
932         if (VIsUndefShuffle)
933           V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
934 
935         Init = SVOp;
936       }
937     }
938 
939     // Extend init to result vector length, and then shuffle its contribution
940     // to the vector initializer into V.
941     if (Args.empty()) {
942       for (unsigned j = 0; j != InitElts; ++j)
943         Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
944       for (unsigned j = InitElts; j != ResElts; ++j)
945         Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
946       llvm::Constant *Mask = llvm::ConstantVector::get(Args);
947       Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
948                                          Mask, "vext");
949 
950       Args.clear();
951       for (unsigned j = 0; j != CurIdx; ++j)
952         Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
953       for (unsigned j = 0; j != InitElts; ++j)
954         Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset));
955       for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
956         Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
957     }
958 
959     // If V is undef, make sure it ends up on the RHS of the shuffle to aid
960     // merging subsequent shuffles into this one.
961     if (CurIdx == 0)
962       std::swap(V, Init);
963     llvm::Constant *Mask = llvm::ConstantVector::get(Args);
964     V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
965     VIsUndefShuffle = isa<llvm::UndefValue>(Init);
966     CurIdx += InitElts;
967   }
968 
969   // FIXME: evaluate codegen vs. shuffling against constant null vector.
970   // Emit remaining default initializers.
971   const llvm::Type *EltTy = VType->getElementType();
972 
973   // Emit remaining default initializers
974   for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
975     Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
976     llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
977     V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
978   }
979   return V;
980 }
981 
982 static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
983   const Expr *E = CE->getSubExpr();
984 
985   if (CE->getCastKind() == CK_UncheckedDerivedToBase)
986     return false;
987 
988   if (isa<CXXThisExpr>(E)) {
989     // We always assume that 'this' is never null.
990     return false;
991   }
992 
993   if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
994     // And that glvalue casts are never null.
995     if (ICE->getValueKind() != VK_RValue)
996       return false;
997   }
998 
999   return true;
1000 }
1001 
1002 // VisitCastExpr - Emit code for an explicit or implicit cast.  Implicit casts
1003 // have to handle a more broad range of conversions than explicit casts, as they
1004 // handle things like function to ptr-to-function decay etc.
1005 Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
1006   Expr *E = CE->getSubExpr();
1007   QualType DestTy = CE->getType();
1008   CastKind Kind = CE->getCastKind();
1009 
1010   if (!DestTy->isVoidType())
1011     TestAndClearIgnoreResultAssign();
1012 
1013   // Since almost all cast kinds apply to scalars, this switch doesn't have
1014   // a default case, so the compiler will warn on a missing case.  The cases
1015   // are in the same order as in the CastKind enum.
1016   switch (Kind) {
1017   case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1018 
1019   case CK_LValueBitCast:
1020   case CK_ObjCObjectLValueCast: {
1021     Value *V = EmitLValue(E).getAddress();
1022     V = Builder.CreateBitCast(V,
1023                           ConvertType(CGF.getContext().getPointerType(DestTy)));
1024     return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), DestTy);
1025   }
1026 
1027   case CK_AnyPointerToObjCPointerCast:
1028   case CK_AnyPointerToBlockPointerCast:
1029   case CK_BitCast: {
1030     Value *Src = Visit(const_cast<Expr*>(E));
1031     return Builder.CreateBitCast(Src, ConvertType(DestTy));
1032   }
1033   case CK_NoOp:
1034   case CK_UserDefinedConversion:
1035     return Visit(const_cast<Expr*>(E));
1036 
1037   case CK_BaseToDerived: {
1038     const CXXRecordDecl *DerivedClassDecl =
1039       DestTy->getCXXRecordDeclForPointerType();
1040 
1041     return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
1042                                         CE->path_begin(), CE->path_end(),
1043                                         ShouldNullCheckClassCastValue(CE));
1044   }
1045   case CK_UncheckedDerivedToBase:
1046   case CK_DerivedToBase: {
1047     const RecordType *DerivedClassTy =
1048       E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
1049     CXXRecordDecl *DerivedClassDecl =
1050       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
1051 
1052     return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
1053                                      CE->path_begin(), CE->path_end(),
1054                                      ShouldNullCheckClassCastValue(CE));
1055   }
1056   case CK_Dynamic: {
1057     Value *V = Visit(const_cast<Expr*>(E));
1058     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
1059     return CGF.EmitDynamicCast(V, DCE);
1060   }
1061 
1062   case CK_ArrayToPointerDecay: {
1063     assert(E->getType()->isArrayType() &&
1064            "Array to pointer decay must have array source type!");
1065 
1066     Value *V = EmitLValue(E).getAddress();  // Bitfields can't be arrays.
1067 
1068     // Note that VLA pointers are always decayed, so we don't need to do
1069     // anything here.
1070     if (!E->getType()->isVariableArrayType()) {
1071       assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
1072       assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
1073                                  ->getElementType()) &&
1074              "Expected pointer to array");
1075       V = Builder.CreateStructGEP(V, 0, "arraydecay");
1076     }
1077 
1078     return V;
1079   }
1080   case CK_FunctionToPointerDecay:
1081     return EmitLValue(E).getAddress();
1082 
1083   case CK_NullToPointer:
1084     if (MustVisitNullValue(E))
1085       (void) Visit(E);
1086 
1087     return llvm::ConstantPointerNull::get(
1088                                cast<llvm::PointerType>(ConvertType(DestTy)));
1089 
1090   case CK_NullToMemberPointer: {
1091     if (MustVisitNullValue(E))
1092       (void) Visit(E);
1093 
1094     const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
1095     return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
1096   }
1097 
1098   case CK_BaseToDerivedMemberPointer:
1099   case CK_DerivedToBaseMemberPointer: {
1100     Value *Src = Visit(E);
1101 
1102     // Note that the AST doesn't distinguish between checked and
1103     // unchecked member pointer conversions, so we always have to
1104     // implement checked conversions here.  This is inefficient when
1105     // actual control flow may be required in order to perform the
1106     // check, which it is for data member pointers (but not member
1107     // function pointers on Itanium and ARM).
1108     return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
1109   }
1110 
1111   case CK_FloatingRealToComplex:
1112   case CK_FloatingComplexCast:
1113   case CK_IntegralRealToComplex:
1114   case CK_IntegralComplexCast:
1115   case CK_IntegralComplexToFloatingComplex:
1116   case CK_FloatingComplexToIntegralComplex:
1117   case CK_ConstructorConversion:
1118   case CK_ToUnion:
1119     llvm_unreachable("scalar cast to non-scalar value");
1120     break;
1121 
1122   case CK_GetObjCProperty: {
1123     assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
1124     assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
1125            "CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
1126     RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType());
1127     return RV.getScalarVal();
1128   }
1129 
1130   case CK_LValueToRValue:
1131     assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
1132     assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1133     return Visit(const_cast<Expr*>(E));
1134 
1135   case CK_IntegralToPointer: {
1136     Value *Src = Visit(const_cast<Expr*>(E));
1137 
1138     // First, convert to the correct width so that we control the kind of
1139     // extension.
1140     const llvm::Type *MiddleTy = CGF.IntPtrTy;
1141     bool InputSigned = E->getType()->isSignedIntegerType();
1142     llvm::Value* IntResult =
1143       Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1144 
1145     return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
1146   }
1147   case CK_PointerToIntegral: {
1148     Value *Src = Visit(const_cast<Expr*>(E));
1149 
1150     // Handle conversion to bool correctly.
1151     if (DestTy->isBooleanType())
1152       return EmitScalarConversion(Src, E->getType(), DestTy);
1153 
1154     return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
1155   }
1156   case CK_ToVoid: {
1157     CGF.EmitIgnoredExpr(E);
1158     return 0;
1159   }
1160   case CK_VectorSplat: {
1161     const llvm::Type *DstTy = ConvertType(DestTy);
1162     Value *Elt = Visit(const_cast<Expr*>(E));
1163 
1164     // Insert the element in element zero of an undef vector
1165     llvm::Value *UnV = llvm::UndefValue::get(DstTy);
1166     llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
1167     UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
1168 
1169     // Splat the element across to all elements
1170     llvm::SmallVector<llvm::Constant*, 16> Args;
1171     unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
1172     llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Int32Ty, 0);
1173     for (unsigned i = 0; i < NumElements; i++)
1174       Args.push_back(Zero);
1175 
1176     llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1177     llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
1178     return Yay;
1179   }
1180 
1181   case CK_IntegralCast:
1182   case CK_IntegralToFloating:
1183   case CK_FloatingToIntegral:
1184   case CK_FloatingCast:
1185     return EmitScalarConversion(Visit(E), E->getType(), DestTy);
1186 
1187   case CK_IntegralToBoolean:
1188     return EmitIntToBoolConversion(Visit(E));
1189   case CK_PointerToBoolean:
1190     return EmitPointerToBoolConversion(Visit(E));
1191   case CK_FloatingToBoolean:
1192     return EmitFloatToBoolConversion(Visit(E));
1193   case CK_MemberPointerToBoolean: {
1194     llvm::Value *MemPtr = Visit(E);
1195     const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
1196     return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
1197   }
1198 
1199   case CK_FloatingComplexToReal:
1200   case CK_IntegralComplexToReal:
1201     return CGF.EmitComplexExpr(E, false, true).first;
1202 
1203   case CK_FloatingComplexToBoolean:
1204   case CK_IntegralComplexToBoolean: {
1205     CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
1206 
1207     // TODO: kill this function off, inline appropriate case here
1208     return EmitComplexToScalarConversion(V, E->getType(), DestTy);
1209   }
1210 
1211   }
1212 
1213   llvm_unreachable("unknown scalar cast");
1214   return 0;
1215 }
1216 
1217 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1218   CodeGenFunction::StmtExprEvaluation eval(CGF);
1219   return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType())
1220     .getScalarVal();
1221 }
1222 
1223 Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
1224   LValue LV = CGF.EmitBlockDeclRefLValue(E);
1225   return CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
1226 }
1227 
1228 //===----------------------------------------------------------------------===//
1229 //                             Unary Operators
1230 //===----------------------------------------------------------------------===//
1231 
1232 llvm::Value *ScalarExprEmitter::
1233 EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
1234                                 llvm::Value *InVal,
1235                                 llvm::Value *NextVal, bool IsInc) {
1236   switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
1237   case LangOptions::SOB_Undefined:
1238     return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
1239     break;
1240   case LangOptions::SOB_Defined:
1241     return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
1242     break;
1243   case LangOptions::SOB_Trapping:
1244     BinOpInfo BinOp;
1245     BinOp.LHS = InVal;
1246     BinOp.RHS = NextVal;
1247     BinOp.Ty = E->getType();
1248     BinOp.Opcode = BO_Add;
1249     BinOp.E = E;
1250     return EmitOverflowCheckedBinOp(BinOp);
1251     break;
1252   }
1253   assert(false && "Unknown SignedOverflowBehaviorTy");
1254   return 0;
1255 }
1256 
1257 llvm::Value *
1258 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1259                                            bool isInc, bool isPre) {
1260 
1261   QualType type = E->getSubExpr()->getType();
1262   llvm::Value *value = EmitLoadOfLValue(LV, type);
1263   llvm::Value *input = value;
1264 
1265   int amount = (isInc ? 1 : -1);
1266 
1267   // Special case of integer increment that we have to check first: bool++.
1268   // Due to promotion rules, we get:
1269   //   bool++ -> bool = bool + 1
1270   //          -> bool = (int)bool + 1
1271   //          -> bool = ((int)bool + 1 != 0)
1272   // An interesting aspect of this is that increment is always true.
1273   // Decrement does not have this property.
1274   if (isInc && type->isBooleanType()) {
1275     value = Builder.getTrue();
1276 
1277   // Most common case by far: integer increment.
1278   } else if (type->isIntegerType()) {
1279 
1280     llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
1281 
1282     // Note that signed integer inc/dec with width less than int can't
1283     // overflow because of promotion rules; we're just eliding a few steps here.
1284     if (type->isSignedIntegerType() &&
1285         value->getType()->getPrimitiveSizeInBits() >=
1286             CGF.CGM.IntTy->getBitWidth())
1287       value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
1288     else
1289       value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
1290 
1291   // Next most common: pointer increment.
1292   } else if (const PointerType *ptr = type->getAs<PointerType>()) {
1293     QualType type = ptr->getPointeeType();
1294 
1295     // VLA types don't have constant size.
1296     if (type->isVariableArrayType()) {
1297       llvm::Value *vlaSize =
1298         CGF.GetVLASize(CGF.getContext().getAsVariableArrayType(type));
1299       value = CGF.EmitCastToVoidPtr(value);
1300       if (!isInc) vlaSize = Builder.CreateNSWNeg(vlaSize, "vla.negsize");
1301       if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
1302         value = Builder.CreateGEP(value, vlaSize, "vla.inc");
1303       else
1304         value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
1305       value = Builder.CreateBitCast(value, input->getType());
1306 
1307     // Arithmetic on function pointers (!) is just +-1.
1308     } else if (type->isFunctionType()) {
1309       llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
1310 
1311       value = CGF.EmitCastToVoidPtr(value);
1312       if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
1313         value = Builder.CreateGEP(value, amt, "incdec.funcptr");
1314       else
1315         value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
1316       value = Builder.CreateBitCast(value, input->getType());
1317 
1318     // For everything else, we can just do a simple increment.
1319     } else {
1320       llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
1321       if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
1322         value = Builder.CreateGEP(value, amt, "incdec.ptr");
1323       else
1324         value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
1325     }
1326 
1327   // Vector increment/decrement.
1328   } else if (type->isVectorType()) {
1329     if (type->hasIntegerRepresentation()) {
1330       llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
1331 
1332       if (type->hasSignedIntegerRepresentation())
1333         value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
1334       else
1335         value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
1336     } else {
1337       value = Builder.CreateFAdd(
1338                   value,
1339                   llvm::ConstantFP::get(value->getType(), amount),
1340                   isInc ? "inc" : "dec");
1341     }
1342 
1343   // Floating point.
1344   } else if (type->isRealFloatingType()) {
1345     // Add the inc/dec to the real part.
1346     llvm::Value *amt;
1347     if (value->getType()->isFloatTy())
1348       amt = llvm::ConstantFP::get(VMContext,
1349                                   llvm::APFloat(static_cast<float>(amount)));
1350     else if (value->getType()->isDoubleTy())
1351       amt = llvm::ConstantFP::get(VMContext,
1352                                   llvm::APFloat(static_cast<double>(amount)));
1353     else {
1354       llvm::APFloat F(static_cast<float>(amount));
1355       bool ignored;
1356       F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
1357                 &ignored);
1358       amt = llvm::ConstantFP::get(VMContext, F);
1359     }
1360     value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
1361 
1362   // Objective-C pointer types.
1363   } else {
1364     const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
1365     value = CGF.EmitCastToVoidPtr(value);
1366 
1367     CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
1368     if (!isInc) size = -size;
1369     llvm::Value *sizeValue =
1370       llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
1371 
1372     if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
1373       value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
1374     else
1375       value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
1376     value = Builder.CreateBitCast(value, input->getType());
1377   }
1378 
1379   // Store the updated result through the lvalue.
1380   if (LV.isBitField())
1381     CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, type, &value);
1382   else
1383     CGF.EmitStoreThroughLValue(RValue::get(value), LV, type);
1384 
1385   // If this is a postinc, return the value read from memory, otherwise use the
1386   // updated value.
1387   return isPre ? value : input;
1388 }
1389 
1390 
1391 
1392 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
1393   TestAndClearIgnoreResultAssign();
1394   // Emit unary minus with EmitSub so we handle overflow cases etc.
1395   BinOpInfo BinOp;
1396   BinOp.RHS = Visit(E->getSubExpr());
1397 
1398   if (BinOp.RHS->getType()->isFPOrFPVectorTy())
1399     BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
1400   else
1401     BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
1402   BinOp.Ty = E->getType();
1403   BinOp.Opcode = BO_Sub;
1404   BinOp.E = E;
1405   return EmitSub(BinOp);
1406 }
1407 
1408 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
1409   TestAndClearIgnoreResultAssign();
1410   Value *Op = Visit(E->getSubExpr());
1411   return Builder.CreateNot(Op, "neg");
1412 }
1413 
1414 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
1415   // Compare operand to zero.
1416   Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
1417 
1418   // Invert value.
1419   // TODO: Could dynamically modify easy computations here.  For example, if
1420   // the operand is an icmp ne, turn into icmp eq.
1421   BoolVal = Builder.CreateNot(BoolVal, "lnot");
1422 
1423   // ZExt result to the expr type.
1424   return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
1425 }
1426 
1427 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
1428   // Try folding the offsetof to a constant.
1429   Expr::EvalResult EvalResult;
1430   if (E->Evaluate(EvalResult, CGF.getContext()))
1431     return llvm::ConstantInt::get(VMContext, EvalResult.Val.getInt());
1432 
1433   // Loop over the components of the offsetof to compute the value.
1434   unsigned n = E->getNumComponents();
1435   const llvm::Type* ResultType = ConvertType(E->getType());
1436   llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
1437   QualType CurrentType = E->getTypeSourceInfo()->getType();
1438   for (unsigned i = 0; i != n; ++i) {
1439     OffsetOfExpr::OffsetOfNode ON = E->getComponent(i);
1440     llvm::Value *Offset = 0;
1441     switch (ON.getKind()) {
1442     case OffsetOfExpr::OffsetOfNode::Array: {
1443       // Compute the index
1444       Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
1445       llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
1446       bool IdxSigned = IdxExpr->getType()->isSignedIntegerType();
1447       Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
1448 
1449       // Save the element type
1450       CurrentType =
1451           CGF.getContext().getAsArrayType(CurrentType)->getElementType();
1452 
1453       // Compute the element size
1454       llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
1455           CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
1456 
1457       // Multiply out to compute the result
1458       Offset = Builder.CreateMul(Idx, ElemSize);
1459       break;
1460     }
1461 
1462     case OffsetOfExpr::OffsetOfNode::Field: {
1463       FieldDecl *MemberDecl = ON.getField();
1464       RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
1465       const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
1466 
1467       // Compute the index of the field in its parent.
1468       unsigned i = 0;
1469       // FIXME: It would be nice if we didn't have to loop here!
1470       for (RecordDecl::field_iterator Field = RD->field_begin(),
1471                                       FieldEnd = RD->field_end();
1472            Field != FieldEnd; (void)++Field, ++i) {
1473         if (*Field == MemberDecl)
1474           break;
1475       }
1476       assert(i < RL.getFieldCount() && "offsetof field in wrong type");
1477 
1478       // Compute the offset to the field
1479       int64_t OffsetInt = RL.getFieldOffset(i) /
1480                           CGF.getContext().getCharWidth();
1481       Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
1482 
1483       // Save the element type.
1484       CurrentType = MemberDecl->getType();
1485       break;
1486     }
1487 
1488     case OffsetOfExpr::OffsetOfNode::Identifier:
1489       llvm_unreachable("dependent __builtin_offsetof");
1490 
1491     case OffsetOfExpr::OffsetOfNode::Base: {
1492       if (ON.getBase()->isVirtual()) {
1493         CGF.ErrorUnsupported(E, "virtual base in offsetof");
1494         continue;
1495       }
1496 
1497       RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
1498       const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
1499 
1500       // Save the element type.
1501       CurrentType = ON.getBase()->getType();
1502 
1503       // Compute the offset to the base.
1504       const RecordType *BaseRT = CurrentType->getAs<RecordType>();
1505       CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
1506       int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
1507                           CGF.getContext().getCharWidth();
1508       Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
1509       break;
1510     }
1511     }
1512     Result = Builder.CreateAdd(Result, Offset);
1513   }
1514   return Result;
1515 }
1516 
1517 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
1518 /// argument of the sizeof expression as an integer.
1519 Value *
1520 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
1521                               const UnaryExprOrTypeTraitExpr *E) {
1522   QualType TypeToSize = E->getTypeOfArgument();
1523   if (E->getKind() == UETT_SizeOf) {
1524     if (const VariableArrayType *VAT =
1525           CGF.getContext().getAsVariableArrayType(TypeToSize)) {
1526       if (E->isArgumentType()) {
1527         // sizeof(type) - make sure to emit the VLA size.
1528         CGF.EmitVLASize(TypeToSize);
1529       } else {
1530         // C99 6.5.3.4p2: If the argument is an expression of type
1531         // VLA, it is evaluated.
1532         CGF.EmitIgnoredExpr(E->getArgumentExpr());
1533       }
1534 
1535       return CGF.GetVLASize(VAT);
1536     }
1537   }
1538 
1539   // If this isn't sizeof(vla), the result must be constant; use the constant
1540   // folding logic so we don't have to duplicate it here.
1541   Expr::EvalResult Result;
1542   E->Evaluate(Result, CGF.getContext());
1543   return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
1544 }
1545 
1546 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
1547   Expr *Op = E->getSubExpr();
1548   if (Op->getType()->isAnyComplexType()) {
1549     // If it's an l-value, load through the appropriate subobject l-value.
1550     // Note that we have to ask E because Op might be an l-value that
1551     // this won't work for, e.g. an Obj-C property.
1552     if (E->isGLValue())
1553       return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
1554                 .getScalarVal();
1555 
1556     // Otherwise, calculate and project.
1557     return CGF.EmitComplexExpr(Op, false, true).first;
1558   }
1559 
1560   return Visit(Op);
1561 }
1562 
1563 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
1564   Expr *Op = E->getSubExpr();
1565   if (Op->getType()->isAnyComplexType()) {
1566     // If it's an l-value, load through the appropriate subobject l-value.
1567     // Note that we have to ask E because Op might be an l-value that
1568     // this won't work for, e.g. an Obj-C property.
1569     if (Op->isGLValue())
1570       return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
1571                 .getScalarVal();
1572 
1573     // Otherwise, calculate and project.
1574     return CGF.EmitComplexExpr(Op, true, false).second;
1575   }
1576 
1577   // __imag on a scalar returns zero.  Emit the subexpr to ensure side
1578   // effects are evaluated, but not the actual value.
1579   CGF.EmitScalarExpr(Op, true);
1580   return llvm::Constant::getNullValue(ConvertType(E->getType()));
1581 }
1582 
1583 //===----------------------------------------------------------------------===//
1584 //                           Binary Operators
1585 //===----------------------------------------------------------------------===//
1586 
1587 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
1588   TestAndClearIgnoreResultAssign();
1589   BinOpInfo Result;
1590   Result.LHS = Visit(E->getLHS());
1591   Result.RHS = Visit(E->getRHS());
1592   Result.Ty  = E->getType();
1593   Result.Opcode = E->getOpcode();
1594   Result.E = E;
1595   return Result;
1596 }
1597 
1598 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
1599                                               const CompoundAssignOperator *E,
1600                         Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
1601                                                    Value *&Result) {
1602   QualType LHSTy = E->getLHS()->getType();
1603   BinOpInfo OpInfo;
1604 
1605   if (E->getComputationResultType()->isAnyComplexType()) {
1606     // This needs to go through the complex expression emitter, but it's a tad
1607     // complicated to do that... I'm leaving it out for now.  (Note that we do
1608     // actually need the imaginary part of the RHS for multiplication and
1609     // division.)
1610     CGF.ErrorUnsupported(E, "complex compound assignment");
1611     Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1612     return LValue();
1613   }
1614 
1615   // Emit the RHS first.  __block variables need to have the rhs evaluated
1616   // first, plus this should improve codegen a little.
1617   OpInfo.RHS = Visit(E->getRHS());
1618   OpInfo.Ty = E->getComputationResultType();
1619   OpInfo.Opcode = E->getOpcode();
1620   OpInfo.E = E;
1621   // Load/convert the LHS.
1622   LValue LHSLV = EmitCheckedLValue(E->getLHS());
1623   OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
1624   OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
1625                                     E->getComputationLHSType());
1626 
1627   // Expand the binary operator.
1628   Result = (this->*Func)(OpInfo);
1629 
1630   // Convert the result back to the LHS type.
1631   Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
1632 
1633   // Store the result value into the LHS lvalue. Bit-fields are handled
1634   // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1635   // 'An assignment expression has the value of the left operand after the
1636   // assignment...'.
1637   if (LHSLV.isBitField())
1638     CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
1639                                        &Result);
1640   else
1641     CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
1642 
1643   return LHSLV;
1644 }
1645 
1646 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
1647                       Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
1648   bool Ignore = TestAndClearIgnoreResultAssign();
1649   Value *RHS;
1650   LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
1651 
1652   // If the result is clearly ignored, return now.
1653   if (Ignore)
1654     return 0;
1655 
1656   // The result of an assignment in C is the assigned r-value.
1657   if (!CGF.getContext().getLangOptions().CPlusPlus)
1658     return RHS;
1659 
1660   // Objective-C property assignment never reloads the value following a store.
1661   if (LHS.isPropertyRef())
1662     return RHS;
1663 
1664   // If the lvalue is non-volatile, return the computed value of the assignment.
1665   if (!LHS.isVolatileQualified())
1666     return RHS;
1667 
1668   // Otherwise, reload the value.
1669   return EmitLoadOfLValue(LHS, E->getType());
1670 }
1671 
1672 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
1673      					    const BinOpInfo &Ops,
1674 				     	    llvm::Value *Zero, bool isDiv) {
1675   llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
1676   llvm::BasicBlock *contBB =
1677     CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn);
1678 
1679   const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
1680 
1681   if (Ops.Ty->hasSignedIntegerRepresentation()) {
1682     llvm::Value *IntMin =
1683       llvm::ConstantInt::get(VMContext,
1684                              llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
1685     llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
1686 
1687     llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
1688     llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
1689     llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
1690     llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
1691     Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
1692                          overflowBB, contBB);
1693   } else {
1694     CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
1695                              overflowBB, contBB);
1696   }
1697   EmitOverflowBB(overflowBB);
1698   Builder.SetInsertPoint(contBB);
1699 }
1700 
1701 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
1702   if (isTrapvOverflowBehavior()) {
1703     llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
1704 
1705     if (Ops.Ty->isIntegerType())
1706       EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
1707     else if (Ops.Ty->isRealFloatingType()) {
1708       llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
1709                                                           CGF.CurFn);
1710       llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn);
1711       CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
1712                                overflowBB, DivCont);
1713       EmitOverflowBB(overflowBB);
1714       Builder.SetInsertPoint(DivCont);
1715     }
1716   }
1717   if (Ops.LHS->getType()->isFPOrFPVectorTy())
1718     return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
1719   else if (Ops.Ty->hasUnsignedIntegerRepresentation())
1720     return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
1721   else
1722     return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
1723 }
1724 
1725 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
1726   // Rem in C can't be a floating point type: C99 6.5.5p2.
1727   if (isTrapvOverflowBehavior()) {
1728     llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
1729 
1730     if (Ops.Ty->isIntegerType())
1731       EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
1732   }
1733 
1734   if (Ops.Ty->hasUnsignedIntegerRepresentation())
1735     return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
1736   else
1737     return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
1738 }
1739 
1740 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
1741   unsigned IID;
1742   unsigned OpID = 0;
1743 
1744   switch (Ops.Opcode) {
1745   case BO_Add:
1746   case BO_AddAssign:
1747     OpID = 1;
1748     IID = llvm::Intrinsic::sadd_with_overflow;
1749     break;
1750   case BO_Sub:
1751   case BO_SubAssign:
1752     OpID = 2;
1753     IID = llvm::Intrinsic::ssub_with_overflow;
1754     break;
1755   case BO_Mul:
1756   case BO_MulAssign:
1757     OpID = 3;
1758     IID = llvm::Intrinsic::smul_with_overflow;
1759     break;
1760   default:
1761     assert(false && "Unsupported operation for overflow detection");
1762     IID = 0;
1763   }
1764   OpID <<= 1;
1765   OpID |= 1;
1766 
1767   const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
1768 
1769   llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
1770 
1771   Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
1772   Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
1773   Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
1774 
1775   // Branch in case of overflow.
1776   llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
1777   llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
1778   llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
1779 
1780   Builder.CreateCondBr(overflow, overflowBB, continueBB);
1781 
1782   // Handle overflow with llvm.trap.
1783   const std::string *handlerName =
1784     &CGF.getContext().getLangOptions().OverflowHandler;
1785   if (handlerName->empty()) {
1786     EmitOverflowBB(overflowBB);
1787     Builder.SetInsertPoint(continueBB);
1788     return result;
1789   }
1790 
1791   // If an overflow handler is set, then we want to call it and then use its
1792   // result, if it returns.
1793   Builder.SetInsertPoint(overflowBB);
1794 
1795   // Get the overflow handler.
1796   const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
1797   std::vector<const llvm::Type*> argTypes;
1798   argTypes.push_back(CGF.Int64Ty); argTypes.push_back(CGF.Int64Ty);
1799   argTypes.push_back(Int8Ty); argTypes.push_back(Int8Ty);
1800   llvm::FunctionType *handlerTy =
1801       llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
1802   llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
1803 
1804   // Sign extend the args to 64-bit, so that we can use the same handler for
1805   // all types of overflow.
1806   llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
1807   llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
1808 
1809   // Call the handler with the two arguments, the operation, and the size of
1810   // the result.
1811   llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs,
1812       Builder.getInt8(OpID),
1813       Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()));
1814 
1815   // Truncate the result back to the desired size.
1816   handlerResult = Builder.CreateTrunc(handlerResult, opTy);
1817   Builder.CreateBr(continueBB);
1818 
1819   Builder.SetInsertPoint(continueBB);
1820   llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
1821   phi->addIncoming(result, initialBB);
1822   phi->addIncoming(handlerResult, overflowBB);
1823 
1824   return phi;
1825 }
1826 
1827 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
1828   if (!Ops.Ty->isAnyPointerType()) {
1829     if (Ops.Ty->hasSignedIntegerRepresentation()) {
1830       switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
1831       case LangOptions::SOB_Undefined:
1832         return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
1833       case LangOptions::SOB_Defined:
1834         return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
1835       case LangOptions::SOB_Trapping:
1836         return EmitOverflowCheckedBinOp(Ops);
1837       }
1838     }
1839 
1840     if (Ops.LHS->getType()->isFPOrFPVectorTy())
1841       return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
1842 
1843     return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
1844   }
1845 
1846   // Must have binary (not unary) expr here.  Unary pointer decrement doesn't
1847   // use this path.
1848   const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
1849 
1850   if (Ops.Ty->isPointerType() &&
1851       Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
1852     // The amount of the addition needs to account for the VLA size
1853     CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
1854   }
1855 
1856   Value *Ptr, *Idx;
1857   Expr *IdxExp;
1858   const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
1859   const ObjCObjectPointerType *OPT =
1860     BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
1861   if (PT || OPT) {
1862     Ptr = Ops.LHS;
1863     Idx = Ops.RHS;
1864     IdxExp = BinOp->getRHS();
1865   } else {  // int + pointer
1866     PT = BinOp->getRHS()->getType()->getAs<PointerType>();
1867     OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
1868     assert((PT || OPT) && "Invalid add expr");
1869     Ptr = Ops.RHS;
1870     Idx = Ops.LHS;
1871     IdxExp = BinOp->getLHS();
1872   }
1873 
1874   unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
1875   if (Width < CGF.PointerWidthInBits) {
1876     // Zero or sign extend the pointer value based on whether the index is
1877     // signed or not.
1878     const llvm::Type *IdxType = CGF.IntPtrTy;
1879     if (IdxExp->getType()->isSignedIntegerType())
1880       Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
1881     else
1882       Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
1883   }
1884   const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
1885   // Handle interface types, which are not represented with a concrete type.
1886   if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
1887     llvm::Value *InterfaceSize =
1888       llvm::ConstantInt::get(Idx->getType(),
1889           CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
1890     Idx = Builder.CreateMul(Idx, InterfaceSize);
1891     const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
1892     Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
1893     Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
1894     return Builder.CreateBitCast(Res, Ptr->getType());
1895   }
1896 
1897   // Explicitly handle GNU void* and function pointer arithmetic extensions. The
1898   // GNU void* casts amount to no-ops since our void* type is i8*, but this is
1899   // future proof.
1900   if (ElementType->isVoidType() || ElementType->isFunctionType()) {
1901     const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
1902     Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
1903     Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
1904     return Builder.CreateBitCast(Res, Ptr->getType());
1905   }
1906 
1907   if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
1908     return Builder.CreateGEP(Ptr, Idx, "add.ptr");
1909   return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
1910 }
1911 
1912 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
1913   if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
1914     if (Ops.Ty->hasSignedIntegerRepresentation()) {
1915       switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
1916       case LangOptions::SOB_Undefined:
1917         return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
1918       case LangOptions::SOB_Defined:
1919         return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
1920       case LangOptions::SOB_Trapping:
1921         return EmitOverflowCheckedBinOp(Ops);
1922       }
1923     }
1924 
1925     if (Ops.LHS->getType()->isFPOrFPVectorTy())
1926       return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
1927 
1928     return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
1929   }
1930 
1931   // Must have binary (not unary) expr here.  Unary pointer increment doesn't
1932   // use this path.
1933   const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
1934 
1935   if (BinOp->getLHS()->getType()->isPointerType() &&
1936       BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
1937     // The amount of the addition needs to account for the VLA size for
1938     // ptr-int
1939     // The amount of the division needs to account for the VLA size for
1940     // ptr-ptr.
1941     CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
1942   }
1943 
1944   const QualType LHSType = BinOp->getLHS()->getType();
1945   const QualType LHSElementType = LHSType->getPointeeType();
1946   if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
1947     // pointer - int
1948     Value *Idx = Ops.RHS;
1949     unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
1950     if (Width < CGF.PointerWidthInBits) {
1951       // Zero or sign extend the pointer value based on whether the index is
1952       // signed or not.
1953       const llvm::Type *IdxType = CGF.IntPtrTy;
1954       if (BinOp->getRHS()->getType()->isSignedIntegerType())
1955         Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
1956       else
1957         Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
1958     }
1959     Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
1960 
1961     // Handle interface types, which are not represented with a concrete type.
1962     if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
1963       llvm::Value *InterfaceSize =
1964         llvm::ConstantInt::get(Idx->getType(),
1965                                CGF.getContext().
1966                                  getTypeSizeInChars(OIT).getQuantity());
1967       Idx = Builder.CreateMul(Idx, InterfaceSize);
1968       const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
1969       Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
1970       Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
1971       return Builder.CreateBitCast(Res, Ops.LHS->getType());
1972     }
1973 
1974     // Explicitly handle GNU void* and function pointer arithmetic
1975     // extensions. The GNU void* casts amount to no-ops since our void* type is
1976     // i8*, but this is future proof.
1977     if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
1978       const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
1979       Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
1980       Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
1981       return Builder.CreateBitCast(Res, Ops.LHS->getType());
1982     }
1983 
1984     if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
1985       return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
1986     return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
1987   }
1988 
1989   // pointer - pointer
1990   Value *LHS = Ops.LHS;
1991   Value *RHS = Ops.RHS;
1992 
1993   CharUnits ElementSize;
1994 
1995   // Handle GCC extension for pointer arithmetic on void* and function pointer
1996   // types.
1997   if (LHSElementType->isVoidType() || LHSElementType->isFunctionType())
1998     ElementSize = CharUnits::One();
1999   else
2000     ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
2001 
2002   const llvm::Type *ResultType = ConvertType(Ops.Ty);
2003   LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
2004   RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2005   Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
2006 
2007   // Optimize out the shift for element size of 1.
2008   if (ElementSize.isOne())
2009     return BytesBetween;
2010 
2011   // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
2012   // pointer difference in C is only defined in the case where both operands
2013   // are pointing to elements of an array.
2014   Value *BytesPerElt =
2015       llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
2016   return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
2017 }
2018 
2019 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
2020   // LLVM requires the LHS and RHS to be the same type: promote or truncate the
2021   // RHS to the same size as the LHS.
2022   Value *RHS = Ops.RHS;
2023   if (Ops.LHS->getType() != RHS->getType())
2024     RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
2025 
2026   if (CGF.CatchUndefined
2027       && isa<llvm::IntegerType>(Ops.LHS->getType())) {
2028     unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
2029     llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
2030     CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
2031                                  llvm::ConstantInt::get(RHS->getType(), Width)),
2032                              Cont, CGF.getTrapBB());
2033     CGF.EmitBlock(Cont);
2034   }
2035 
2036   return Builder.CreateShl(Ops.LHS, RHS, "shl");
2037 }
2038 
2039 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
2040   // LLVM requires the LHS and RHS to be the same type: promote or truncate the
2041   // RHS to the same size as the LHS.
2042   Value *RHS = Ops.RHS;
2043   if (Ops.LHS->getType() != RHS->getType())
2044     RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
2045 
2046   if (CGF.CatchUndefined
2047       && isa<llvm::IntegerType>(Ops.LHS->getType())) {
2048     unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
2049     llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
2050     CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
2051                                  llvm::ConstantInt::get(RHS->getType(), Width)),
2052                              Cont, CGF.getTrapBB());
2053     CGF.EmitBlock(Cont);
2054   }
2055 
2056   if (Ops.Ty->hasUnsignedIntegerRepresentation())
2057     return Builder.CreateLShr(Ops.LHS, RHS, "shr");
2058   return Builder.CreateAShr(Ops.LHS, RHS, "shr");
2059 }
2060 
2061 enum IntrinsicType { VCMPEQ, VCMPGT };
2062 // return corresponding comparison intrinsic for given vector type
2063 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
2064                                         BuiltinType::Kind ElemKind) {
2065   switch (ElemKind) {
2066   default: assert(0 && "unexpected element type");
2067   case BuiltinType::Char_U:
2068   case BuiltinType::UChar:
2069     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
2070                             llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
2071     break;
2072   case BuiltinType::Char_S:
2073   case BuiltinType::SChar:
2074     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
2075                             llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
2076     break;
2077   case BuiltinType::UShort:
2078     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
2079                             llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
2080     break;
2081   case BuiltinType::Short:
2082     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
2083                             llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
2084     break;
2085   case BuiltinType::UInt:
2086   case BuiltinType::ULong:
2087     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
2088                             llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
2089     break;
2090   case BuiltinType::Int:
2091   case BuiltinType::Long:
2092     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
2093                             llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
2094     break;
2095   case BuiltinType::Float:
2096     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
2097                             llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
2098     break;
2099   }
2100   return llvm::Intrinsic::not_intrinsic;
2101 }
2102 
2103 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
2104                                       unsigned SICmpOpc, unsigned FCmpOpc) {
2105   TestAndClearIgnoreResultAssign();
2106   Value *Result;
2107   QualType LHSTy = E->getLHS()->getType();
2108   if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
2109     assert(E->getOpcode() == BO_EQ ||
2110            E->getOpcode() == BO_NE);
2111     Value *LHS = CGF.EmitScalarExpr(E->getLHS());
2112     Value *RHS = CGF.EmitScalarExpr(E->getRHS());
2113     Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
2114                    CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
2115   } else if (!LHSTy->isAnyComplexType()) {
2116     Value *LHS = Visit(E->getLHS());
2117     Value *RHS = Visit(E->getRHS());
2118 
2119     // If AltiVec, the comparison results in a numeric type, so we use
2120     // intrinsics comparing vectors and giving 0 or 1 as a result
2121     if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
2122       // constants for mapping CR6 register bits to predicate result
2123       enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
2124 
2125       llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
2126 
2127       // in several cases vector arguments order will be reversed
2128       Value *FirstVecArg = LHS,
2129             *SecondVecArg = RHS;
2130 
2131       QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
2132       const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
2133       BuiltinType::Kind ElementKind = BTy->getKind();
2134 
2135       switch(E->getOpcode()) {
2136       default: assert(0 && "is not a comparison operation");
2137       case BO_EQ:
2138         CR6 = CR6_LT;
2139         ID = GetIntrinsic(VCMPEQ, ElementKind);
2140         break;
2141       case BO_NE:
2142         CR6 = CR6_EQ;
2143         ID = GetIntrinsic(VCMPEQ, ElementKind);
2144         break;
2145       case BO_LT:
2146         CR6 = CR6_LT;
2147         ID = GetIntrinsic(VCMPGT, ElementKind);
2148         std::swap(FirstVecArg, SecondVecArg);
2149         break;
2150       case BO_GT:
2151         CR6 = CR6_LT;
2152         ID = GetIntrinsic(VCMPGT, ElementKind);
2153         break;
2154       case BO_LE:
2155         if (ElementKind == BuiltinType::Float) {
2156           CR6 = CR6_LT;
2157           ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
2158           std::swap(FirstVecArg, SecondVecArg);
2159         }
2160         else {
2161           CR6 = CR6_EQ;
2162           ID = GetIntrinsic(VCMPGT, ElementKind);
2163         }
2164         break;
2165       case BO_GE:
2166         if (ElementKind == BuiltinType::Float) {
2167           CR6 = CR6_LT;
2168           ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
2169         }
2170         else {
2171           CR6 = CR6_EQ;
2172           ID = GetIntrinsic(VCMPGT, ElementKind);
2173           std::swap(FirstVecArg, SecondVecArg);
2174         }
2175         break;
2176       }
2177 
2178       Value *CR6Param = llvm::ConstantInt::get(CGF.Int32Ty, CR6);
2179       llvm::Function *F = CGF.CGM.getIntrinsic(ID);
2180       Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, "");
2181       return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
2182     }
2183 
2184     if (LHS->getType()->isFPOrFPVectorTy()) {
2185       Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
2186                                   LHS, RHS, "cmp");
2187     } else if (LHSTy->hasSignedIntegerRepresentation()) {
2188       Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
2189                                   LHS, RHS, "cmp");
2190     } else {
2191       // Unsigned integers and pointers.
2192       Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
2193                                   LHS, RHS, "cmp");
2194     }
2195 
2196     // If this is a vector comparison, sign extend the result to the appropriate
2197     // vector integer type and return it (don't convert to bool).
2198     if (LHSTy->isVectorType())
2199       return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2200 
2201   } else {
2202     // Complex Comparison: can only be an equality comparison.
2203     CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
2204     CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
2205 
2206     QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
2207 
2208     Value *ResultR, *ResultI;
2209     if (CETy->isRealFloatingType()) {
2210       ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
2211                                    LHS.first, RHS.first, "cmp.r");
2212       ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
2213                                    LHS.second, RHS.second, "cmp.i");
2214     } else {
2215       // Complex comparisons can only be equality comparisons.  As such, signed
2216       // and unsigned opcodes are the same.
2217       ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
2218                                    LHS.first, RHS.first, "cmp.r");
2219       ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
2220                                    LHS.second, RHS.second, "cmp.i");
2221     }
2222 
2223     if (E->getOpcode() == BO_EQ) {
2224       Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
2225     } else {
2226       assert(E->getOpcode() == BO_NE &&
2227              "Complex comparison other than == or != ?");
2228       Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
2229     }
2230   }
2231 
2232   return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
2233 }
2234 
2235 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
2236   bool Ignore = TestAndClearIgnoreResultAssign();
2237 
2238   // __block variables need to have the rhs evaluated first, plus this should
2239   // improve codegen just a little.
2240   Value *RHS = Visit(E->getRHS());
2241   LValue LHS = EmitCheckedLValue(E->getLHS());
2242 
2243   // Store the value into the LHS.  Bit-fields are handled specially
2244   // because the result is altered by the store, i.e., [C99 6.5.16p1]
2245   // 'An assignment expression has the value of the left operand after
2246   // the assignment...'.
2247   if (LHS.isBitField())
2248     CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
2249                                        &RHS);
2250   else
2251     CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
2252 
2253   // If the result is clearly ignored, return now.
2254   if (Ignore)
2255     return 0;
2256 
2257   // The result of an assignment in C is the assigned r-value.
2258   if (!CGF.getContext().getLangOptions().CPlusPlus)
2259     return RHS;
2260 
2261   // Objective-C property assignment never reloads the value following a store.
2262   if (LHS.isPropertyRef())
2263     return RHS;
2264 
2265   // If the lvalue is non-volatile, return the computed value of the assignment.
2266   if (!LHS.isVolatileQualified())
2267     return RHS;
2268 
2269   // Otherwise, reload the value.
2270   return EmitLoadOfLValue(LHS, E->getType());
2271 }
2272 
2273 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
2274   const llvm::Type *ResTy = ConvertType(E->getType());
2275 
2276   // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
2277   // If we have 1 && X, just emit X without inserting the control flow.
2278   bool LHSCondVal;
2279   if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
2280     if (LHSCondVal) { // If we have 1 && X, just emit X.
2281       Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
2282       // ZExt result to int or bool.
2283       return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
2284     }
2285 
2286     // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
2287     if (!CGF.ContainsLabel(E->getRHS()))
2288       return llvm::Constant::getNullValue(ResTy);
2289   }
2290 
2291   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
2292   llvm::BasicBlock *RHSBlock  = CGF.createBasicBlock("land.rhs");
2293 
2294   CodeGenFunction::ConditionalEvaluation eval(CGF);
2295 
2296   // Branch on the LHS first.  If it is false, go to the failure (cont) block.
2297   CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
2298 
2299   // Any edges into the ContBlock are now from an (indeterminate number of)
2300   // edges from this first condition.  All of these values will be false.  Start
2301   // setting up the PHI node in the Cont Block for this.
2302   llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
2303                                             "", ContBlock);
2304   for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
2305        PI != PE; ++PI)
2306     PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
2307 
2308   eval.begin(CGF);
2309   CGF.EmitBlock(RHSBlock);
2310   Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
2311   eval.end(CGF);
2312 
2313   // Reaquire the RHS block, as there may be subblocks inserted.
2314   RHSBlock = Builder.GetInsertBlock();
2315 
2316   // Emit an unconditional branch from this block to ContBlock.  Insert an entry
2317   // into the phi node for the edge with the value of RHSCond.
2318   if (CGF.getDebugInfo())
2319     // There is no need to emit line number for unconditional branch.
2320     Builder.SetCurrentDebugLocation(llvm::DebugLoc());
2321   CGF.EmitBlock(ContBlock);
2322   PN->addIncoming(RHSCond, RHSBlock);
2323 
2324   // ZExt result to int.
2325   return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
2326 }
2327 
2328 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
2329   const llvm::Type *ResTy = ConvertType(E->getType());
2330 
2331   // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
2332   // If we have 0 || X, just emit X without inserting the control flow.
2333   bool LHSCondVal;
2334   if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
2335     if (!LHSCondVal) { // If we have 0 || X, just emit X.
2336       Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
2337       // ZExt result to int or bool.
2338       return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
2339     }
2340 
2341     // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
2342     if (!CGF.ContainsLabel(E->getRHS()))
2343       return llvm::ConstantInt::get(ResTy, 1);
2344   }
2345 
2346   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
2347   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
2348 
2349   CodeGenFunction::ConditionalEvaluation eval(CGF);
2350 
2351   // Branch on the LHS first.  If it is true, go to the success (cont) block.
2352   CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
2353 
2354   // Any edges into the ContBlock are now from an (indeterminate number of)
2355   // edges from this first condition.  All of these values will be true.  Start
2356   // setting up the PHI node in the Cont Block for this.
2357   llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
2358                                             "", ContBlock);
2359   for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
2360        PI != PE; ++PI)
2361     PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
2362 
2363   eval.begin(CGF);
2364 
2365   // Emit the RHS condition as a bool value.
2366   CGF.EmitBlock(RHSBlock);
2367   Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
2368 
2369   eval.end(CGF);
2370 
2371   // Reaquire the RHS block, as there may be subblocks inserted.
2372   RHSBlock = Builder.GetInsertBlock();
2373 
2374   // Emit an unconditional branch from this block to ContBlock.  Insert an entry
2375   // into the phi node for the edge with the value of RHSCond.
2376   CGF.EmitBlock(ContBlock);
2377   PN->addIncoming(RHSCond, RHSBlock);
2378 
2379   // ZExt result to int.
2380   return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
2381 }
2382 
2383 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
2384   CGF.EmitIgnoredExpr(E->getLHS());
2385   CGF.EnsureInsertPoint();
2386   return Visit(E->getRHS());
2387 }
2388 
2389 //===----------------------------------------------------------------------===//
2390 //                             Other Operators
2391 //===----------------------------------------------------------------------===//
2392 
2393 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
2394 /// expression is cheap enough and side-effect-free enough to evaluate
2395 /// unconditionally instead of conditionally.  This is used to convert control
2396 /// flow into selects in some cases.
2397 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
2398                                                    CodeGenFunction &CGF) {
2399   if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
2400     return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF);
2401 
2402   // TODO: Allow anything we can constant fold to an integer or fp constant.
2403   if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
2404       isa<FloatingLiteral>(E))
2405     return true;
2406 
2407   // Non-volatile automatic variables too, to get "cond ? X : Y" where
2408   // X and Y are local variables.
2409   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
2410     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
2411       if (VD->hasLocalStorage() && !(CGF.getContext()
2412                                      .getCanonicalType(VD->getType())
2413                                      .isVolatileQualified()))
2414         return true;
2415 
2416   return false;
2417 }
2418 
2419 
2420 Value *ScalarExprEmitter::
2421 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
2422   TestAndClearIgnoreResultAssign();
2423 
2424   // Bind the common expression if necessary.
2425   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
2426 
2427   Expr *condExpr = E->getCond();
2428   Expr *lhsExpr = E->getTrueExpr();
2429   Expr *rhsExpr = E->getFalseExpr();
2430 
2431   // If the condition constant folds and can be elided, try to avoid emitting
2432   // the condition and the dead arm.
2433   bool CondExprBool;
2434   if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2435     Expr *live = lhsExpr, *dead = rhsExpr;
2436     if (!CondExprBool) std::swap(live, dead);
2437 
2438     // If the dead side doesn't have labels we need, and if the Live side isn't
2439     // the gnu missing ?: extension (which we could handle, but don't bother
2440     // to), just emit the Live part.
2441     if (!CGF.ContainsLabel(dead))
2442       return Visit(live);
2443   }
2444 
2445   // OpenCL: If the condition is a vector, we can treat this condition like
2446   // the select function.
2447   if (CGF.getContext().getLangOptions().OpenCL
2448       && condExpr->getType()->isVectorType()) {
2449     llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
2450     llvm::Value *LHS = Visit(lhsExpr);
2451     llvm::Value *RHS = Visit(rhsExpr);
2452 
2453     const llvm::Type *condType = ConvertType(condExpr->getType());
2454     const llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
2455 
2456     unsigned numElem = vecTy->getNumElements();
2457     const llvm::Type *elemType = vecTy->getElementType();
2458 
2459     std::vector<llvm::Constant*> Zvals;
2460     for (unsigned i = 0; i < numElem; ++i)
2461       Zvals.push_back(llvm::ConstantInt::get(elemType,0));
2462 
2463     llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals);
2464     llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
2465     llvm::Value *tmp = Builder.CreateSExt(TestMSB,
2466                                           llvm::VectorType::get(elemType,
2467                                                                 numElem),
2468                                           "sext");
2469     llvm::Value *tmp2 = Builder.CreateNot(tmp);
2470 
2471     // Cast float to int to perform ANDs if necessary.
2472     llvm::Value *RHSTmp = RHS;
2473     llvm::Value *LHSTmp = LHS;
2474     bool wasCast = false;
2475     const llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
2476     if (rhsVTy->getElementType()->isFloatTy()) {
2477       RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
2478       LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
2479       wasCast = true;
2480     }
2481 
2482     llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
2483     llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
2484     llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
2485     if (wasCast)
2486       tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
2487 
2488     return tmp5;
2489   }
2490 
2491   // If this is a really simple expression (like x ? 4 : 5), emit this as a
2492   // select instead of as control flow.  We can only do this if it is cheap and
2493   // safe to evaluate the LHS and RHS unconditionally.
2494   if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
2495       isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
2496     llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
2497     llvm::Value *LHS = Visit(lhsExpr);
2498     llvm::Value *RHS = Visit(rhsExpr);
2499     return Builder.CreateSelect(CondV, LHS, RHS, "cond");
2500   }
2501 
2502   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
2503   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
2504   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
2505 
2506   CodeGenFunction::ConditionalEvaluation eval(CGF);
2507   CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock);
2508 
2509   CGF.EmitBlock(LHSBlock);
2510   eval.begin(CGF);
2511   Value *LHS = Visit(lhsExpr);
2512   eval.end(CGF);
2513 
2514   LHSBlock = Builder.GetInsertBlock();
2515   Builder.CreateBr(ContBlock);
2516 
2517   CGF.EmitBlock(RHSBlock);
2518   eval.begin(CGF);
2519   Value *RHS = Visit(rhsExpr);
2520   eval.end(CGF);
2521 
2522   RHSBlock = Builder.GetInsertBlock();
2523   CGF.EmitBlock(ContBlock);
2524 
2525   // If the LHS or RHS is a throw expression, it will be legitimately null.
2526   if (!LHS)
2527     return RHS;
2528   if (!RHS)
2529     return LHS;
2530 
2531   // Create a PHI node for the real part.
2532   llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
2533   PN->addIncoming(LHS, LHSBlock);
2534   PN->addIncoming(RHS, RHSBlock);
2535   return PN;
2536 }
2537 
2538 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
2539   return Visit(E->getChosenSubExpr(CGF.getContext()));
2540 }
2541 
2542 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
2543   llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
2544   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
2545 
2546   // If EmitVAArg fails, we fall back to the LLVM instruction.
2547   if (!ArgPtr)
2548     return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
2549 
2550   // FIXME Volatility.
2551   return Builder.CreateLoad(ArgPtr);
2552 }
2553 
2554 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
2555   return CGF.EmitBlockLiteral(block);
2556 }
2557 
2558 //===----------------------------------------------------------------------===//
2559 //                         Entry Point into this File
2560 //===----------------------------------------------------------------------===//
2561 
2562 /// EmitScalarExpr - Emit the computation of the specified expression of scalar
2563 /// type, ignoring the result.
2564 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
2565   assert(E && !hasAggregateLLVMType(E->getType()) &&
2566          "Invalid scalar expression to emit");
2567 
2568   if (isa<CXXDefaultArgExpr>(E))
2569     disableDebugInfo();
2570   Value *V = ScalarExprEmitter(*this, IgnoreResultAssign)
2571     .Visit(const_cast<Expr*>(E));
2572   if (isa<CXXDefaultArgExpr>(E))
2573     enableDebugInfo();
2574   return V;
2575 }
2576 
2577 /// EmitScalarConversion - Emit a conversion from the specified type to the
2578 /// specified destination type, both of which are LLVM scalar types.
2579 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
2580                                              QualType DstTy) {
2581   assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
2582          "Invalid scalar expression to emit");
2583   return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
2584 }
2585 
2586 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex
2587 /// type to the specified destination type, where the destination type is an
2588 /// LLVM scalar type.
2589 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
2590                                                       QualType SrcTy,
2591                                                       QualType DstTy) {
2592   assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
2593          "Invalid complex -> scalar conversion");
2594   return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
2595                                                                 DstTy);
2596 }
2597 
2598 
2599 llvm::Value *CodeGenFunction::
2600 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2601                         bool isInc, bool isPre) {
2602   return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
2603 }
2604 
2605 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
2606   llvm::Value *V;
2607   // object->isa or (*object).isa
2608   // Generate code as for: *(Class*)object
2609   // build Class* type
2610   const llvm::Type *ClassPtrTy = ConvertType(E->getType());
2611 
2612   Expr *BaseExpr = E->getBase();
2613   if (BaseExpr->isRValue()) {
2614     V = CreateTempAlloca(ClassPtrTy, "resval");
2615     llvm::Value *Src = EmitScalarExpr(BaseExpr);
2616     Builder.CreateStore(Src, V);
2617     V = ScalarExprEmitter(*this).EmitLoadOfLValue(
2618       MakeAddrLValue(V, E->getType()), E->getType());
2619   } else {
2620     if (E->isArrow())
2621       V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
2622     else
2623       V = EmitLValue(BaseExpr).getAddress();
2624   }
2625 
2626   // build Class* type
2627   ClassPtrTy = ClassPtrTy->getPointerTo();
2628   V = Builder.CreateBitCast(V, ClassPtrTy);
2629   return MakeAddrLValue(V, E->getType());
2630 }
2631 
2632 
2633 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
2634                                             const CompoundAssignOperator *E) {
2635   ScalarExprEmitter Scalar(*this);
2636   Value *Result = 0;
2637   switch (E->getOpcode()) {
2638 #define COMPOUND_OP(Op)                                                       \
2639     case BO_##Op##Assign:                                                     \
2640       return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
2641                                              Result)
2642   COMPOUND_OP(Mul);
2643   COMPOUND_OP(Div);
2644   COMPOUND_OP(Rem);
2645   COMPOUND_OP(Add);
2646   COMPOUND_OP(Sub);
2647   COMPOUND_OP(Shl);
2648   COMPOUND_OP(Shr);
2649   COMPOUND_OP(And);
2650   COMPOUND_OP(Xor);
2651   COMPOUND_OP(Or);
2652 #undef COMPOUND_OP
2653 
2654   case BO_PtrMemD:
2655   case BO_PtrMemI:
2656   case BO_Mul:
2657   case BO_Div:
2658   case BO_Rem:
2659   case BO_Add:
2660   case BO_Sub:
2661   case BO_Shl:
2662   case BO_Shr:
2663   case BO_LT:
2664   case BO_GT:
2665   case BO_LE:
2666   case BO_GE:
2667   case BO_EQ:
2668   case BO_NE:
2669   case BO_And:
2670   case BO_Xor:
2671   case BO_Or:
2672   case BO_LAnd:
2673   case BO_LOr:
2674   case BO_Assign:
2675   case BO_Comma:
2676     assert(false && "Not valid compound assignment operators");
2677     break;
2678   }
2679 
2680   llvm_unreachable("Unhandled compound assignment operator");
2681 }
2682