1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Stmt nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGDebugInfo.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/StmtVisitor.h" 19 #include "clang/Sema/SemaDiagnostic.h" 20 #include "clang/Basic/PrettyStackTrace.h" 21 #include "clang/Basic/TargetInfo.h" 22 #include "llvm/ADT/StringExtras.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/InlineAsm.h" 25 #include "llvm/IR/Intrinsics.h" 26 #include "llvm/Support/CallSite.h" 27 using namespace clang; 28 using namespace CodeGen; 29 30 //===----------------------------------------------------------------------===// 31 // Statement Emission 32 //===----------------------------------------------------------------------===// 33 34 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 35 if (CGDebugInfo *DI = getDebugInfo()) { 36 SourceLocation Loc; 37 Loc = S->getLocStart(); 38 DI->EmitLocation(Builder, Loc); 39 40 LastStopPoint = Loc; 41 } 42 } 43 44 void CodeGenFunction::EmitStmt(const Stmt *S) { 45 assert(S && "Null statement?"); 46 47 // These statements have their own debug info handling. 48 if (EmitSimpleStmt(S)) 49 return; 50 51 // Check if we are generating unreachable code. 52 if (!HaveInsertPoint()) { 53 // If so, and the statement doesn't contain a label, then we do not need to 54 // generate actual code. This is safe because (1) the current point is 55 // unreachable, so we don't need to execute the code, and (2) we've already 56 // handled the statements which update internal data structures (like the 57 // local variable map) which could be used by subsequent statements. 58 if (!ContainsLabel(S)) { 59 // Verify that any decl statements were handled as simple, they may be in 60 // scope of subsequent reachable statements. 61 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 62 return; 63 } 64 65 // Otherwise, make a new block to hold the code. 66 EnsureInsertPoint(); 67 } 68 69 // Generate a stoppoint if we are emitting debug info. 70 EmitStopPoint(S); 71 72 switch (S->getStmtClass()) { 73 case Stmt::NoStmtClass: 74 case Stmt::CXXCatchStmtClass: 75 case Stmt::SEHExceptStmtClass: 76 case Stmt::SEHFinallyStmtClass: 77 case Stmt::MSDependentExistsStmtClass: 78 case Stmt::OMPParallelDirectiveClass: 79 llvm_unreachable("invalid statement class to emit generically"); 80 case Stmt::NullStmtClass: 81 case Stmt::CompoundStmtClass: 82 case Stmt::DeclStmtClass: 83 case Stmt::LabelStmtClass: 84 case Stmt::AttributedStmtClass: 85 case Stmt::GotoStmtClass: 86 case Stmt::BreakStmtClass: 87 case Stmt::ContinueStmtClass: 88 case Stmt::DefaultStmtClass: 89 case Stmt::CaseStmtClass: 90 llvm_unreachable("should have emitted these statements as simple"); 91 92 #define STMT(Type, Base) 93 #define ABSTRACT_STMT(Op) 94 #define EXPR(Type, Base) \ 95 case Stmt::Type##Class: 96 #include "clang/AST/StmtNodes.inc" 97 { 98 // Remember the block we came in on. 99 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 100 assert(incoming && "expression emission must have an insertion point"); 101 102 EmitIgnoredExpr(cast<Expr>(S)); 103 104 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 105 assert(outgoing && "expression emission cleared block!"); 106 107 // The expression emitters assume (reasonably!) that the insertion 108 // point is always set. To maintain that, the call-emission code 109 // for noreturn functions has to enter a new block with no 110 // predecessors. We want to kill that block and mark the current 111 // insertion point unreachable in the common case of a call like 112 // "exit();". Since expression emission doesn't otherwise create 113 // blocks with no predecessors, we can just test for that. 114 // However, we must be careful not to do this to our incoming 115 // block, because *statement* emission does sometimes create 116 // reachable blocks which will have no predecessors until later in 117 // the function. This occurs with, e.g., labels that are not 118 // reachable by fallthrough. 119 if (incoming != outgoing && outgoing->use_empty()) { 120 outgoing->eraseFromParent(); 121 Builder.ClearInsertionPoint(); 122 } 123 break; 124 } 125 126 case Stmt::IndirectGotoStmtClass: 127 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 128 129 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 130 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break; 131 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break; 132 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break; 133 134 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 135 136 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 137 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 138 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 139 case Stmt::CapturedStmtClass: { 140 const CapturedStmt *CS = cast<CapturedStmt>(S); 141 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 142 } 143 break; 144 case Stmt::ObjCAtTryStmtClass: 145 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 146 break; 147 case Stmt::ObjCAtCatchStmtClass: 148 llvm_unreachable( 149 "@catch statements should be handled by EmitObjCAtTryStmt"); 150 case Stmt::ObjCAtFinallyStmtClass: 151 llvm_unreachable( 152 "@finally statements should be handled by EmitObjCAtTryStmt"); 153 case Stmt::ObjCAtThrowStmtClass: 154 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 155 break; 156 case Stmt::ObjCAtSynchronizedStmtClass: 157 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 158 break; 159 case Stmt::ObjCForCollectionStmtClass: 160 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 161 break; 162 case Stmt::ObjCAutoreleasePoolStmtClass: 163 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 164 break; 165 166 case Stmt::CXXTryStmtClass: 167 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 168 break; 169 case Stmt::CXXForRangeStmtClass: 170 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S)); 171 break; 172 case Stmt::SEHTryStmtClass: 173 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 174 break; 175 } 176 } 177 178 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { 179 switch (S->getStmtClass()) { 180 default: return false; 181 case Stmt::NullStmtClass: break; 182 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; 183 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; 184 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; 185 case Stmt::AttributedStmtClass: 186 EmitAttributedStmt(cast<AttributedStmt>(*S)); break; 187 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; 188 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break; 189 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; 190 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; 191 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; 192 } 193 194 return true; 195 } 196 197 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 198 /// this captures the expression result of the last sub-statement and returns it 199 /// (for use by the statement expression extension). 200 llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 201 AggValueSlot AggSlot) { 202 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 203 "LLVM IR generation of compound statement ('{}')"); 204 205 // Keep track of the current cleanup stack depth, including debug scopes. 206 LexicalScope Scope(*this, S.getSourceRange()); 207 208 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 209 } 210 211 llvm::Value* 212 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 213 bool GetLast, 214 AggValueSlot AggSlot) { 215 216 for (CompoundStmt::const_body_iterator I = S.body_begin(), 217 E = S.body_end()-GetLast; I != E; ++I) 218 EmitStmt(*I); 219 220 llvm::Value *RetAlloca = 0; 221 if (GetLast) { 222 // We have to special case labels here. They are statements, but when put 223 // at the end of a statement expression, they yield the value of their 224 // subexpression. Handle this by walking through all labels we encounter, 225 // emitting them before we evaluate the subexpr. 226 const Stmt *LastStmt = S.body_back(); 227 while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) { 228 EmitLabel(LS->getDecl()); 229 LastStmt = LS->getSubStmt(); 230 } 231 232 EnsureInsertPoint(); 233 234 QualType ExprTy = cast<Expr>(LastStmt)->getType(); 235 if (hasAggregateEvaluationKind(ExprTy)) { 236 EmitAggExpr(cast<Expr>(LastStmt), AggSlot); 237 } else { 238 // We can't return an RValue here because there might be cleanups at 239 // the end of the StmtExpr. Because of that, we have to emit the result 240 // here into a temporary alloca. 241 RetAlloca = CreateMemTemp(ExprTy); 242 EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(), 243 /*IsInit*/false); 244 } 245 246 } 247 248 return RetAlloca; 249 } 250 251 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 252 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 253 254 // If there is a cleanup stack, then we it isn't worth trying to 255 // simplify this block (we would need to remove it from the scope map 256 // and cleanup entry). 257 if (!EHStack.empty()) 258 return; 259 260 // Can only simplify direct branches. 261 if (!BI || !BI->isUnconditional()) 262 return; 263 264 // Can only simplify empty blocks. 265 if (BI != BB->begin()) 266 return; 267 268 BB->replaceAllUsesWith(BI->getSuccessor(0)); 269 BI->eraseFromParent(); 270 BB->eraseFromParent(); 271 } 272 273 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 274 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 275 276 // Fall out of the current block (if necessary). 277 EmitBranch(BB); 278 279 if (IsFinished && BB->use_empty()) { 280 delete BB; 281 return; 282 } 283 284 // Place the block after the current block, if possible, or else at 285 // the end of the function. 286 if (CurBB && CurBB->getParent()) 287 CurFn->getBasicBlockList().insertAfter(CurBB, BB); 288 else 289 CurFn->getBasicBlockList().push_back(BB); 290 Builder.SetInsertPoint(BB); 291 } 292 293 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 294 // Emit a branch from the current block to the target one if this 295 // was a real block. If this was just a fall-through block after a 296 // terminator, don't emit it. 297 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 298 299 if (!CurBB || CurBB->getTerminator()) { 300 // If there is no insert point or the previous block is already 301 // terminated, don't touch it. 302 } else { 303 // Otherwise, create a fall-through branch. 304 Builder.CreateBr(Target); 305 } 306 307 Builder.ClearInsertionPoint(); 308 } 309 310 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 311 bool inserted = false; 312 for (llvm::BasicBlock::use_iterator 313 i = block->use_begin(), e = block->use_end(); i != e; ++i) { 314 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) { 315 CurFn->getBasicBlockList().insertAfter(insn->getParent(), block); 316 inserted = true; 317 break; 318 } 319 } 320 321 if (!inserted) 322 CurFn->getBasicBlockList().push_back(block); 323 324 Builder.SetInsertPoint(block); 325 } 326 327 CodeGenFunction::JumpDest 328 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 329 JumpDest &Dest = LabelMap[D]; 330 if (Dest.isValid()) return Dest; 331 332 // Create, but don't insert, the new block. 333 Dest = JumpDest(createBasicBlock(D->getName()), 334 EHScopeStack::stable_iterator::invalid(), 335 NextCleanupDestIndex++); 336 return Dest; 337 } 338 339 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 340 // Add this label to the current lexical scope if we're within any 341 // normal cleanups. Jumps "in" to this label --- when permitted by 342 // the language --- may need to be routed around such cleanups. 343 if (EHStack.hasNormalCleanups() && CurLexicalScope) 344 CurLexicalScope->addLabel(D); 345 346 JumpDest &Dest = LabelMap[D]; 347 348 // If we didn't need a forward reference to this label, just go 349 // ahead and create a destination at the current scope. 350 if (!Dest.isValid()) { 351 Dest = getJumpDestInCurrentScope(D->getName()); 352 353 // Otherwise, we need to give this label a target depth and remove 354 // it from the branch-fixups list. 355 } else { 356 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 357 Dest.setScopeDepth(EHStack.stable_begin()); 358 ResolveBranchFixups(Dest.getBlock()); 359 } 360 361 EmitBlock(Dest.getBlock()); 362 } 363 364 /// Change the cleanup scope of the labels in this lexical scope to 365 /// match the scope of the enclosing context. 366 void CodeGenFunction::LexicalScope::rescopeLabels() { 367 assert(!Labels.empty()); 368 EHScopeStack::stable_iterator innermostScope 369 = CGF.EHStack.getInnermostNormalCleanup(); 370 371 // Change the scope depth of all the labels. 372 for (SmallVectorImpl<const LabelDecl*>::const_iterator 373 i = Labels.begin(), e = Labels.end(); i != e; ++i) { 374 assert(CGF.LabelMap.count(*i)); 375 JumpDest &dest = CGF.LabelMap.find(*i)->second; 376 assert(dest.getScopeDepth().isValid()); 377 assert(innermostScope.encloses(dest.getScopeDepth())); 378 dest.setScopeDepth(innermostScope); 379 } 380 381 // Reparent the labels if the new scope also has cleanups. 382 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 383 ParentScope->Labels.append(Labels.begin(), Labels.end()); 384 } 385 } 386 387 388 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 389 EmitLabel(S.getDecl()); 390 EmitStmt(S.getSubStmt()); 391 } 392 393 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 394 EmitStmt(S.getSubStmt()); 395 } 396 397 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 398 // If this code is reachable then emit a stop point (if generating 399 // debug info). We have to do this ourselves because we are on the 400 // "simple" statement path. 401 if (HaveInsertPoint()) 402 EmitStopPoint(&S); 403 404 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 405 } 406 407 408 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 409 if (const LabelDecl *Target = S.getConstantTarget()) { 410 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 411 return; 412 } 413 414 // Ensure that we have an i8* for our PHI node. 415 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 416 Int8PtrTy, "addr"); 417 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 418 419 // Get the basic block for the indirect goto. 420 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 421 422 // The first instruction in the block has to be the PHI for the switch dest, 423 // add an entry for this branch. 424 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 425 426 EmitBranch(IndGotoBB); 427 } 428 429 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 430 // C99 6.8.4.1: The first substatement is executed if the expression compares 431 // unequal to 0. The condition must be a scalar type. 432 LexicalScope ConditionScope(*this, S.getSourceRange()); 433 434 if (S.getConditionVariable()) 435 EmitAutoVarDecl(*S.getConditionVariable()); 436 437 // If the condition constant folds and can be elided, try to avoid emitting 438 // the condition and the dead arm of the if/else. 439 bool CondConstant; 440 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) { 441 // Figure out which block (then or else) is executed. 442 const Stmt *Executed = S.getThen(); 443 const Stmt *Skipped = S.getElse(); 444 if (!CondConstant) // Condition false? 445 std::swap(Executed, Skipped); 446 447 // If the skipped block has no labels in it, just emit the executed block. 448 // This avoids emitting dead code and simplifies the CFG substantially. 449 if (!ContainsLabel(Skipped)) { 450 if (Executed) { 451 RunCleanupsScope ExecutedScope(*this); 452 EmitStmt(Executed); 453 } 454 return; 455 } 456 } 457 458 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 459 // the conditional branch. 460 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 461 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 462 llvm::BasicBlock *ElseBlock = ContBlock; 463 if (S.getElse()) 464 ElseBlock = createBasicBlock("if.else"); 465 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock); 466 467 // Emit the 'then' code. 468 EmitBlock(ThenBlock); 469 { 470 RunCleanupsScope ThenScope(*this); 471 EmitStmt(S.getThen()); 472 } 473 EmitBranch(ContBlock); 474 475 // Emit the 'else' code if present. 476 if (const Stmt *Else = S.getElse()) { 477 // There is no need to emit line number for unconditional branch. 478 if (getDebugInfo()) 479 Builder.SetCurrentDebugLocation(llvm::DebugLoc()); 480 EmitBlock(ElseBlock); 481 { 482 RunCleanupsScope ElseScope(*this); 483 EmitStmt(Else); 484 } 485 // There is no need to emit line number for unconditional branch. 486 if (getDebugInfo()) 487 Builder.SetCurrentDebugLocation(llvm::DebugLoc()); 488 EmitBranch(ContBlock); 489 } 490 491 // Emit the continuation block for code after the if. 492 EmitBlock(ContBlock, true); 493 } 494 495 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { 496 // Emit the header for the loop, which will also become 497 // the continue target. 498 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 499 EmitBlock(LoopHeader.getBlock()); 500 501 // Create an exit block for when the condition fails, which will 502 // also become the break target. 503 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 504 505 // Store the blocks to use for break and continue. 506 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 507 508 // C++ [stmt.while]p2: 509 // When the condition of a while statement is a declaration, the 510 // scope of the variable that is declared extends from its point 511 // of declaration (3.3.2) to the end of the while statement. 512 // [...] 513 // The object created in a condition is destroyed and created 514 // with each iteration of the loop. 515 RunCleanupsScope ConditionScope(*this); 516 517 if (S.getConditionVariable()) 518 EmitAutoVarDecl(*S.getConditionVariable()); 519 520 // Evaluate the conditional in the while header. C99 6.8.5.1: The 521 // evaluation of the controlling expression takes place before each 522 // execution of the loop body. 523 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 524 525 // while(1) is common, avoid extra exit blocks. Be sure 526 // to correctly handle break/continue though. 527 bool EmitBoolCondBranch = true; 528 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 529 if (C->isOne()) 530 EmitBoolCondBranch = false; 531 532 // As long as the condition is true, go to the loop body. 533 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 534 if (EmitBoolCondBranch) { 535 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 536 if (ConditionScope.requiresCleanups()) 537 ExitBlock = createBasicBlock("while.exit"); 538 539 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 540 541 if (ExitBlock != LoopExit.getBlock()) { 542 EmitBlock(ExitBlock); 543 EmitBranchThroughCleanup(LoopExit); 544 } 545 } 546 547 // Emit the loop body. We have to emit this in a cleanup scope 548 // because it might be a singleton DeclStmt. 549 { 550 RunCleanupsScope BodyScope(*this); 551 EmitBlock(LoopBody); 552 EmitStmt(S.getBody()); 553 } 554 555 BreakContinueStack.pop_back(); 556 557 // Immediately force cleanup. 558 ConditionScope.ForceCleanup(); 559 560 // Branch to the loop header again. 561 EmitBranch(LoopHeader.getBlock()); 562 563 // Emit the exit block. 564 EmitBlock(LoopExit.getBlock(), true); 565 566 // The LoopHeader typically is just a branch if we skipped emitting 567 // a branch, try to erase it. 568 if (!EmitBoolCondBranch) 569 SimplifyForwardingBlocks(LoopHeader.getBlock()); 570 } 571 572 void CodeGenFunction::EmitDoStmt(const DoStmt &S) { 573 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 574 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 575 576 // Store the blocks to use for break and continue. 577 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 578 579 // Emit the body of the loop. 580 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 581 EmitBlock(LoopBody); 582 { 583 RunCleanupsScope BodyScope(*this); 584 EmitStmt(S.getBody()); 585 } 586 587 BreakContinueStack.pop_back(); 588 589 EmitBlock(LoopCond.getBlock()); 590 591 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 592 // after each execution of the loop body." 593 594 // Evaluate the conditional in the while header. 595 // C99 6.8.5p2/p4: The first substatement is executed if the expression 596 // compares unequal to 0. The condition must be a scalar type. 597 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 598 599 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 600 // to correctly handle break/continue though. 601 bool EmitBoolCondBranch = true; 602 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 603 if (C->isZero()) 604 EmitBoolCondBranch = false; 605 606 // As long as the condition is true, iterate the loop. 607 if (EmitBoolCondBranch) 608 Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock()); 609 610 // Emit the exit block. 611 EmitBlock(LoopExit.getBlock()); 612 613 // The DoCond block typically is just a branch if we skipped 614 // emitting a branch, try to erase it. 615 if (!EmitBoolCondBranch) 616 SimplifyForwardingBlocks(LoopCond.getBlock()); 617 } 618 619 void CodeGenFunction::EmitForStmt(const ForStmt &S) { 620 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 621 622 RunCleanupsScope ForScope(*this); 623 624 CGDebugInfo *DI = getDebugInfo(); 625 if (DI) 626 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 627 628 // Evaluate the first part before the loop. 629 if (S.getInit()) 630 EmitStmt(S.getInit()); 631 632 // Start the loop with a block that tests the condition. 633 // If there's an increment, the continue scope will be overwritten 634 // later. 635 JumpDest Continue = getJumpDestInCurrentScope("for.cond"); 636 llvm::BasicBlock *CondBlock = Continue.getBlock(); 637 EmitBlock(CondBlock); 638 639 // Create a cleanup scope for the condition variable cleanups. 640 RunCleanupsScope ConditionScope(*this); 641 642 llvm::Value *BoolCondVal = 0; 643 if (S.getCond()) { 644 // If the for statement has a condition scope, emit the local variable 645 // declaration. 646 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 647 if (S.getConditionVariable()) { 648 EmitAutoVarDecl(*S.getConditionVariable()); 649 } 650 651 // If there are any cleanups between here and the loop-exit scope, 652 // create a block to stage a loop exit along. 653 if (ForScope.requiresCleanups()) 654 ExitBlock = createBasicBlock("for.cond.cleanup"); 655 656 // As long as the condition is true, iterate the loop. 657 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 658 659 // C99 6.8.5p2/p4: The first substatement is executed if the expression 660 // compares unequal to 0. The condition must be a scalar type. 661 BoolCondVal = EvaluateExprAsBool(S.getCond()); 662 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock); 663 664 if (ExitBlock != LoopExit.getBlock()) { 665 EmitBlock(ExitBlock); 666 EmitBranchThroughCleanup(LoopExit); 667 } 668 669 EmitBlock(ForBody); 670 } else { 671 // Treat it as a non-zero constant. Don't even create a new block for the 672 // body, just fall into it. 673 } 674 675 // If the for loop doesn't have an increment we can just use the 676 // condition as the continue block. Otherwise we'll need to create 677 // a block for it (in the current scope, i.e. in the scope of the 678 // condition), and that we will become our continue block. 679 if (S.getInc()) 680 Continue = getJumpDestInCurrentScope("for.inc"); 681 682 // Store the blocks to use for break and continue. 683 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 684 685 { 686 // Create a separate cleanup scope for the body, in case it is not 687 // a compound statement. 688 RunCleanupsScope BodyScope(*this); 689 EmitStmt(S.getBody()); 690 } 691 692 // If there is an increment, emit it next. 693 if (S.getInc()) { 694 EmitBlock(Continue.getBlock()); 695 EmitStmt(S.getInc()); 696 } 697 698 BreakContinueStack.pop_back(); 699 700 ConditionScope.ForceCleanup(); 701 EmitBranch(CondBlock); 702 703 ForScope.ForceCleanup(); 704 705 if (DI) 706 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 707 708 // Emit the fall-through block. 709 EmitBlock(LoopExit.getBlock(), true); 710 } 711 712 void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) { 713 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 714 715 RunCleanupsScope ForScope(*this); 716 717 CGDebugInfo *DI = getDebugInfo(); 718 if (DI) 719 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 720 721 // Evaluate the first pieces before the loop. 722 EmitStmt(S.getRangeStmt()); 723 EmitStmt(S.getBeginEndStmt()); 724 725 // Start the loop with a block that tests the condition. 726 // If there's an increment, the continue scope will be overwritten 727 // later. 728 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 729 EmitBlock(CondBlock); 730 731 // If there are any cleanups between here and the loop-exit scope, 732 // create a block to stage a loop exit along. 733 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 734 if (ForScope.requiresCleanups()) 735 ExitBlock = createBasicBlock("for.cond.cleanup"); 736 737 // The loop body, consisting of the specified body and the loop variable. 738 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 739 740 // The body is executed if the expression, contextually converted 741 // to bool, is true. 742 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 743 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock); 744 745 if (ExitBlock != LoopExit.getBlock()) { 746 EmitBlock(ExitBlock); 747 EmitBranchThroughCleanup(LoopExit); 748 } 749 750 EmitBlock(ForBody); 751 752 // Create a block for the increment. In case of a 'continue', we jump there. 753 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 754 755 // Store the blocks to use for break and continue. 756 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 757 758 { 759 // Create a separate cleanup scope for the loop variable and body. 760 RunCleanupsScope BodyScope(*this); 761 EmitStmt(S.getLoopVarStmt()); 762 EmitStmt(S.getBody()); 763 } 764 765 // If there is an increment, emit it next. 766 EmitBlock(Continue.getBlock()); 767 EmitStmt(S.getInc()); 768 769 BreakContinueStack.pop_back(); 770 771 EmitBranch(CondBlock); 772 773 ForScope.ForceCleanup(); 774 775 if (DI) 776 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 777 778 // Emit the fall-through block. 779 EmitBlock(LoopExit.getBlock(), true); 780 } 781 782 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 783 if (RV.isScalar()) { 784 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 785 } else if (RV.isAggregate()) { 786 EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty); 787 } else { 788 EmitStoreOfComplex(RV.getComplexVal(), 789 MakeNaturalAlignAddrLValue(ReturnValue, Ty), 790 /*init*/ true); 791 } 792 EmitBranchThroughCleanup(ReturnBlock); 793 } 794 795 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 796 /// if the function returns void, or may be missing one if the function returns 797 /// non-void. Fun stuff :). 798 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 799 // Emit the result value, even if unused, to evalute the side effects. 800 const Expr *RV = S.getRetValue(); 801 802 // Treat block literals in a return expression as if they appeared 803 // in their own scope. This permits a small, easily-implemented 804 // exception to our over-conservative rules about not jumping to 805 // statements following block literals with non-trivial cleanups. 806 RunCleanupsScope cleanupScope(*this); 807 if (const ExprWithCleanups *cleanups = 808 dyn_cast_or_null<ExprWithCleanups>(RV)) { 809 enterFullExpression(cleanups); 810 RV = cleanups->getSubExpr(); 811 } 812 813 // FIXME: Clean this up by using an LValue for ReturnTemp, 814 // EmitStoreThroughLValue, and EmitAnyExpr. 815 if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { 816 // Apply the named return value optimization for this return statement, 817 // which means doing nothing: the appropriate result has already been 818 // constructed into the NRVO variable. 819 820 // If there is an NRVO flag for this variable, set it to 1 into indicate 821 // that the cleanup code should not destroy the variable. 822 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 823 Builder.CreateStore(Builder.getTrue(), NRVOFlag); 824 } else if (!ReturnValue) { 825 // Make sure not to return anything, but evaluate the expression 826 // for side effects. 827 if (RV) 828 EmitAnyExpr(RV); 829 } else if (RV == 0) { 830 // Do nothing (return value is left uninitialized) 831 } else if (FnRetTy->isReferenceType()) { 832 // If this function returns a reference, take the address of the expression 833 // rather than the value. 834 RValue Result = EmitReferenceBindingToExpr(RV); 835 Builder.CreateStore(Result.getScalarVal(), ReturnValue); 836 } else { 837 switch (getEvaluationKind(RV->getType())) { 838 case TEK_Scalar: 839 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); 840 break; 841 case TEK_Complex: 842 EmitComplexExprIntoLValue(RV, 843 MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()), 844 /*isInit*/ true); 845 break; 846 case TEK_Aggregate: { 847 CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType()); 848 EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, 849 Qualifiers(), 850 AggValueSlot::IsDestructed, 851 AggValueSlot::DoesNotNeedGCBarriers, 852 AggValueSlot::IsNotAliased)); 853 break; 854 } 855 } 856 } 857 858 ++NumReturnExprs; 859 if (RV == 0 || RV->isEvaluatable(getContext())) 860 ++NumSimpleReturnExprs; 861 862 cleanupScope.ForceCleanup(); 863 EmitBranchThroughCleanup(ReturnBlock); 864 } 865 866 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 867 // As long as debug info is modeled with instructions, we have to ensure we 868 // have a place to insert here and write the stop point here. 869 if (HaveInsertPoint()) 870 EmitStopPoint(&S); 871 872 for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end(); 873 I != E; ++I) 874 EmitDecl(**I); 875 } 876 877 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 878 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 879 880 // If this code is reachable then emit a stop point (if generating 881 // debug info). We have to do this ourselves because we are on the 882 // "simple" statement path. 883 if (HaveInsertPoint()) 884 EmitStopPoint(&S); 885 886 JumpDest Block = BreakContinueStack.back().BreakBlock; 887 EmitBranchThroughCleanup(Block); 888 } 889 890 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 891 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 892 893 // If this code is reachable then emit a stop point (if generating 894 // debug info). We have to do this ourselves because we are on the 895 // "simple" statement path. 896 if (HaveInsertPoint()) 897 EmitStopPoint(&S); 898 899 JumpDest Block = BreakContinueStack.back().ContinueBlock; 900 EmitBranchThroughCleanup(Block); 901 } 902 903 /// EmitCaseStmtRange - If case statement range is not too big then 904 /// add multiple cases to switch instruction, one for each value within 905 /// the range. If range is too big then emit "if" condition check. 906 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { 907 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 908 909 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 910 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 911 912 // Emit the code for this case. We do this first to make sure it is 913 // properly chained from our predecessor before generating the 914 // switch machinery to enter this block. 915 EmitBlock(createBasicBlock("sw.bb")); 916 llvm::BasicBlock *CaseDest = Builder.GetInsertBlock(); 917 EmitStmt(S.getSubStmt()); 918 919 // If range is empty, do nothing. 920 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 921 return; 922 923 llvm::APInt Range = RHS - LHS; 924 // FIXME: parameters such as this should not be hardcoded. 925 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 926 // Range is small enough to add multiple switch instruction cases. 927 for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) { 928 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 929 LHS++; 930 } 931 return; 932 } 933 934 // The range is too big. Emit "if" condition into a new block, 935 // making sure to save and restore the current insertion point. 936 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 937 938 // Push this test onto the chain of range checks (which terminates 939 // in the default basic block). The switch's default will be changed 940 // to the top of this chain after switch emission is complete. 941 llvm::BasicBlock *FalseDest = CaseRangeBlock; 942 CaseRangeBlock = createBasicBlock("sw.caserange"); 943 944 CurFn->getBasicBlockList().push_back(CaseRangeBlock); 945 Builder.SetInsertPoint(CaseRangeBlock); 946 947 // Emit range check. 948 llvm::Value *Diff = 949 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 950 llvm::Value *Cond = 951 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 952 Builder.CreateCondBr(Cond, CaseDest, FalseDest); 953 954 // Restore the appropriate insertion point. 955 if (RestoreBB) 956 Builder.SetInsertPoint(RestoreBB); 957 else 958 Builder.ClearInsertionPoint(); 959 } 960 961 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { 962 // If there is no enclosing switch instance that we're aware of, then this 963 // case statement and its block can be elided. This situation only happens 964 // when we've constant-folded the switch, are emitting the constant case, 965 // and part of the constant case includes another case statement. For 966 // instance: switch (4) { case 4: do { case 5: } while (1); } 967 if (!SwitchInsn) { 968 EmitStmt(S.getSubStmt()); 969 return; 970 } 971 972 // Handle case ranges. 973 if (S.getRHS()) { 974 EmitCaseStmtRange(S); 975 return; 976 } 977 978 llvm::ConstantInt *CaseVal = 979 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 980 981 // If the body of the case is just a 'break', and if there was no fallthrough, 982 // try to not emit an empty block. 983 if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && 984 isa<BreakStmt>(S.getSubStmt())) { 985 JumpDest Block = BreakContinueStack.back().BreakBlock; 986 987 // Only do this optimization if there are no cleanups that need emitting. 988 if (isObviouslyBranchWithoutCleanups(Block)) { 989 SwitchInsn->addCase(CaseVal, Block.getBlock()); 990 991 // If there was a fallthrough into this case, make sure to redirect it to 992 // the end of the switch as well. 993 if (Builder.GetInsertBlock()) { 994 Builder.CreateBr(Block.getBlock()); 995 Builder.ClearInsertionPoint(); 996 } 997 return; 998 } 999 } 1000 1001 EmitBlock(createBasicBlock("sw.bb")); 1002 llvm::BasicBlock *CaseDest = Builder.GetInsertBlock(); 1003 SwitchInsn->addCase(CaseVal, CaseDest); 1004 1005 // Recursively emitting the statement is acceptable, but is not wonderful for 1006 // code where we have many case statements nested together, i.e.: 1007 // case 1: 1008 // case 2: 1009 // case 3: etc. 1010 // Handling this recursively will create a new block for each case statement 1011 // that falls through to the next case which is IR intensive. It also causes 1012 // deep recursion which can run into stack depth limitations. Handle 1013 // sequential non-range case statements specially. 1014 const CaseStmt *CurCase = &S; 1015 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1016 1017 // Otherwise, iteratively add consecutive cases to this switch stmt. 1018 while (NextCase && NextCase->getRHS() == 0) { 1019 CurCase = NextCase; 1020 llvm::ConstantInt *CaseVal = 1021 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1022 SwitchInsn->addCase(CaseVal, CaseDest); 1023 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1024 } 1025 1026 // Normal default recursion for non-cases. 1027 EmitStmt(CurCase->getSubStmt()); 1028 } 1029 1030 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { 1031 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1032 assert(DefaultBlock->empty() && 1033 "EmitDefaultStmt: Default block already defined?"); 1034 EmitBlock(DefaultBlock); 1035 EmitStmt(S.getSubStmt()); 1036 } 1037 1038 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1039 /// constant value that is being switched on, see if we can dead code eliminate 1040 /// the body of the switch to a simple series of statements to emit. Basically, 1041 /// on a switch (5) we want to find these statements: 1042 /// case 5: 1043 /// printf(...); <-- 1044 /// ++i; <-- 1045 /// break; 1046 /// 1047 /// and add them to the ResultStmts vector. If it is unsafe to do this 1048 /// transformation (for example, one of the elided statements contains a label 1049 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1050 /// should include statements after it (e.g. the printf() line is a substmt of 1051 /// the case) then return CSFC_FallThrough. If we handled it and found a break 1052 /// statement, then return CSFC_Success. 1053 /// 1054 /// If Case is non-null, then we are looking for the specified case, checking 1055 /// that nothing we jump over contains labels. If Case is null, then we found 1056 /// the case and are looking for the break. 1057 /// 1058 /// If the recursive walk actually finds our Case, then we set FoundCase to 1059 /// true. 1060 /// 1061 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 1062 static CSFC_Result CollectStatementsForCase(const Stmt *S, 1063 const SwitchCase *Case, 1064 bool &FoundCase, 1065 SmallVectorImpl<const Stmt*> &ResultStmts) { 1066 // If this is a null statement, just succeed. 1067 if (S == 0) 1068 return Case ? CSFC_Success : CSFC_FallThrough; 1069 1070 // If this is the switchcase (case 4: or default) that we're looking for, then 1071 // we're in business. Just add the substatement. 1072 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 1073 if (S == Case) { 1074 FoundCase = true; 1075 return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase, 1076 ResultStmts); 1077 } 1078 1079 // Otherwise, this is some other case or default statement, just ignore it. 1080 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 1081 ResultStmts); 1082 } 1083 1084 // If we are in the live part of the code and we found our break statement, 1085 // return a success! 1086 if (Case == 0 && isa<BreakStmt>(S)) 1087 return CSFC_Success; 1088 1089 // If this is a switch statement, then it might contain the SwitchCase, the 1090 // break, or neither. 1091 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 1092 // Handle this as two cases: we might be looking for the SwitchCase (if so 1093 // the skipped statements must be skippable) or we might already have it. 1094 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 1095 if (Case) { 1096 // Keep track of whether we see a skipped declaration. The code could be 1097 // using the declaration even if it is skipped, so we can't optimize out 1098 // the decl if the kept statements might refer to it. 1099 bool HadSkippedDecl = false; 1100 1101 // If we're looking for the case, just see if we can skip each of the 1102 // substatements. 1103 for (; Case && I != E; ++I) { 1104 HadSkippedDecl |= isa<DeclStmt>(*I); 1105 1106 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 1107 case CSFC_Failure: return CSFC_Failure; 1108 case CSFC_Success: 1109 // A successful result means that either 1) that the statement doesn't 1110 // have the case and is skippable, or 2) does contain the case value 1111 // and also contains the break to exit the switch. In the later case, 1112 // we just verify the rest of the statements are elidable. 1113 if (FoundCase) { 1114 // If we found the case and skipped declarations, we can't do the 1115 // optimization. 1116 if (HadSkippedDecl) 1117 return CSFC_Failure; 1118 1119 for (++I; I != E; ++I) 1120 if (CodeGenFunction::ContainsLabel(*I, true)) 1121 return CSFC_Failure; 1122 return CSFC_Success; 1123 } 1124 break; 1125 case CSFC_FallThrough: 1126 // If we have a fallthrough condition, then we must have found the 1127 // case started to include statements. Consider the rest of the 1128 // statements in the compound statement as candidates for inclusion. 1129 assert(FoundCase && "Didn't find case but returned fallthrough?"); 1130 // We recursively found Case, so we're not looking for it anymore. 1131 Case = 0; 1132 1133 // If we found the case and skipped declarations, we can't do the 1134 // optimization. 1135 if (HadSkippedDecl) 1136 return CSFC_Failure; 1137 break; 1138 } 1139 } 1140 } 1141 1142 // If we have statements in our range, then we know that the statements are 1143 // live and need to be added to the set of statements we're tracking. 1144 for (; I != E; ++I) { 1145 switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) { 1146 case CSFC_Failure: return CSFC_Failure; 1147 case CSFC_FallThrough: 1148 // A fallthrough result means that the statement was simple and just 1149 // included in ResultStmt, keep adding them afterwards. 1150 break; 1151 case CSFC_Success: 1152 // A successful result means that we found the break statement and 1153 // stopped statement inclusion. We just ensure that any leftover stmts 1154 // are skippable and return success ourselves. 1155 for (++I; I != E; ++I) 1156 if (CodeGenFunction::ContainsLabel(*I, true)) 1157 return CSFC_Failure; 1158 return CSFC_Success; 1159 } 1160 } 1161 1162 return Case ? CSFC_Success : CSFC_FallThrough; 1163 } 1164 1165 // Okay, this is some other statement that we don't handle explicitly, like a 1166 // for statement or increment etc. If we are skipping over this statement, 1167 // just verify it doesn't have labels, which would make it invalid to elide. 1168 if (Case) { 1169 if (CodeGenFunction::ContainsLabel(S, true)) 1170 return CSFC_Failure; 1171 return CSFC_Success; 1172 } 1173 1174 // Otherwise, we want to include this statement. Everything is cool with that 1175 // so long as it doesn't contain a break out of the switch we're in. 1176 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 1177 1178 // Otherwise, everything is great. Include the statement and tell the caller 1179 // that we fall through and include the next statement as well. 1180 ResultStmts.push_back(S); 1181 return CSFC_FallThrough; 1182 } 1183 1184 /// FindCaseStatementsForValue - Find the case statement being jumped to and 1185 /// then invoke CollectStatementsForCase to find the list of statements to emit 1186 /// for a switch on constant. See the comment above CollectStatementsForCase 1187 /// for more details. 1188 static bool FindCaseStatementsForValue(const SwitchStmt &S, 1189 const llvm::APSInt &ConstantCondValue, 1190 SmallVectorImpl<const Stmt*> &ResultStmts, 1191 ASTContext &C) { 1192 // First step, find the switch case that is being branched to. We can do this 1193 // efficiently by scanning the SwitchCase list. 1194 const SwitchCase *Case = S.getSwitchCaseList(); 1195 const DefaultStmt *DefaultCase = 0; 1196 1197 for (; Case; Case = Case->getNextSwitchCase()) { 1198 // It's either a default or case. Just remember the default statement in 1199 // case we're not jumping to any numbered cases. 1200 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 1201 DefaultCase = DS; 1202 continue; 1203 } 1204 1205 // Check to see if this case is the one we're looking for. 1206 const CaseStmt *CS = cast<CaseStmt>(Case); 1207 // Don't handle case ranges yet. 1208 if (CS->getRHS()) return false; 1209 1210 // If we found our case, remember it as 'case'. 1211 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 1212 break; 1213 } 1214 1215 // If we didn't find a matching case, we use a default if it exists, or we 1216 // elide the whole switch body! 1217 if (Case == 0) { 1218 // It is safe to elide the body of the switch if it doesn't contain labels 1219 // etc. If it is safe, return successfully with an empty ResultStmts list. 1220 if (DefaultCase == 0) 1221 return !CodeGenFunction::ContainsLabel(&S); 1222 Case = DefaultCase; 1223 } 1224 1225 // Ok, we know which case is being jumped to, try to collect all the 1226 // statements that follow it. This can fail for a variety of reasons. Also, 1227 // check to see that the recursive walk actually found our case statement. 1228 // Insane cases like this can fail to find it in the recursive walk since we 1229 // don't handle every stmt kind: 1230 // switch (4) { 1231 // while (1) { 1232 // case 4: ... 1233 bool FoundCase = false; 1234 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 1235 ResultStmts) != CSFC_Failure && 1236 FoundCase; 1237 } 1238 1239 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 1240 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 1241 1242 RunCleanupsScope ConditionScope(*this); 1243 1244 if (S.getConditionVariable()) 1245 EmitAutoVarDecl(*S.getConditionVariable()); 1246 1247 // Handle nested switch statements. 1248 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 1249 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 1250 1251 // See if we can constant fold the condition of the switch and therefore only 1252 // emit the live case statement (if any) of the switch. 1253 llvm::APSInt ConstantCondValue; 1254 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 1255 SmallVector<const Stmt*, 4> CaseStmts; 1256 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 1257 getContext())) { 1258 RunCleanupsScope ExecutedScope(*this); 1259 1260 // At this point, we are no longer "within" a switch instance, so 1261 // we can temporarily enforce this to ensure that any embedded case 1262 // statements are not emitted. 1263 SwitchInsn = 0; 1264 1265 // Okay, we can dead code eliminate everything except this case. Emit the 1266 // specified series of statements and we're good. 1267 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) 1268 EmitStmt(CaseStmts[i]); 1269 1270 // Now we want to restore the saved switch instance so that nested 1271 // switches continue to function properly 1272 SwitchInsn = SavedSwitchInsn; 1273 1274 return; 1275 } 1276 } 1277 1278 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 1279 1280 // Create basic block to hold stuff that comes after switch 1281 // statement. We also need to create a default block now so that 1282 // explicit case ranges tests can have a place to jump to on 1283 // failure. 1284 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 1285 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 1286 CaseRangeBlock = DefaultBlock; 1287 1288 // Clear the insertion point to indicate we are in unreachable code. 1289 Builder.ClearInsertionPoint(); 1290 1291 // All break statements jump to NextBlock. If BreakContinueStack is non empty 1292 // then reuse last ContinueBlock. 1293 JumpDest OuterContinue; 1294 if (!BreakContinueStack.empty()) 1295 OuterContinue = BreakContinueStack.back().ContinueBlock; 1296 1297 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 1298 1299 // Emit switch body. 1300 EmitStmt(S.getBody()); 1301 1302 BreakContinueStack.pop_back(); 1303 1304 // Update the default block in case explicit case range tests have 1305 // been chained on top. 1306 SwitchInsn->setDefaultDest(CaseRangeBlock); 1307 1308 // If a default was never emitted: 1309 if (!DefaultBlock->getParent()) { 1310 // If we have cleanups, emit the default block so that there's a 1311 // place to jump through the cleanups from. 1312 if (ConditionScope.requiresCleanups()) { 1313 EmitBlock(DefaultBlock); 1314 1315 // Otherwise, just forward the default block to the switch end. 1316 } else { 1317 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 1318 delete DefaultBlock; 1319 } 1320 } 1321 1322 ConditionScope.ForceCleanup(); 1323 1324 // Emit continuation. 1325 EmitBlock(SwitchExit.getBlock(), true); 1326 1327 SwitchInsn = SavedSwitchInsn; 1328 CaseRangeBlock = SavedCRBlock; 1329 } 1330 1331 static std::string 1332 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 1333 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) { 1334 std::string Result; 1335 1336 while (*Constraint) { 1337 switch (*Constraint) { 1338 default: 1339 Result += Target.convertConstraint(Constraint); 1340 break; 1341 // Ignore these 1342 case '*': 1343 case '?': 1344 case '!': 1345 case '=': // Will see this and the following in mult-alt constraints. 1346 case '+': 1347 break; 1348 case '#': // Ignore the rest of the constraint alternative. 1349 while (Constraint[1] && Constraint[1] != ',') 1350 Constraint++; 1351 break; 1352 case ',': 1353 Result += "|"; 1354 break; 1355 case 'g': 1356 Result += "imr"; 1357 break; 1358 case '[': { 1359 assert(OutCons && 1360 "Must pass output names to constraints with a symbolic name"); 1361 unsigned Index; 1362 bool result = Target.resolveSymbolicName(Constraint, 1363 &(*OutCons)[0], 1364 OutCons->size(), Index); 1365 assert(result && "Could not resolve symbolic name"); (void)result; 1366 Result += llvm::utostr(Index); 1367 break; 1368 } 1369 } 1370 1371 Constraint++; 1372 } 1373 1374 return Result; 1375 } 1376 1377 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 1378 /// as using a particular register add that as a constraint that will be used 1379 /// in this asm stmt. 1380 static std::string 1381 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 1382 const TargetInfo &Target, CodeGenModule &CGM, 1383 const AsmStmt &Stmt) { 1384 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 1385 if (!AsmDeclRef) 1386 return Constraint; 1387 const ValueDecl &Value = *AsmDeclRef->getDecl(); 1388 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 1389 if (!Variable) 1390 return Constraint; 1391 if (Variable->getStorageClass() != SC_Register) 1392 return Constraint; 1393 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 1394 if (!Attr) 1395 return Constraint; 1396 StringRef Register = Attr->getLabel(); 1397 assert(Target.isValidGCCRegisterName(Register)); 1398 // We're using validateOutputConstraint here because we only care if 1399 // this is a register constraint. 1400 TargetInfo::ConstraintInfo Info(Constraint, ""); 1401 if (Target.validateOutputConstraint(Info) && 1402 !Info.allowsRegister()) { 1403 CGM.ErrorUnsupported(&Stmt, "__asm__"); 1404 return Constraint; 1405 } 1406 // Canonicalize the register here before returning it. 1407 Register = Target.getNormalizedGCCRegisterName(Register); 1408 return "{" + Register.str() + "}"; 1409 } 1410 1411 llvm::Value* 1412 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, 1413 LValue InputValue, QualType InputType, 1414 std::string &ConstraintStr, 1415 SourceLocation Loc) { 1416 llvm::Value *Arg; 1417 if (Info.allowsRegister() || !Info.allowsMemory()) { 1418 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { 1419 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal(); 1420 } else { 1421 llvm::Type *Ty = ConvertType(InputType); 1422 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 1423 if (Size <= 64 && llvm::isPowerOf2_64(Size)) { 1424 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 1425 Ty = llvm::PointerType::getUnqual(Ty); 1426 1427 Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(), 1428 Ty)); 1429 } else { 1430 Arg = InputValue.getAddress(); 1431 ConstraintStr += '*'; 1432 } 1433 } 1434 } else { 1435 Arg = InputValue.getAddress(); 1436 ConstraintStr += '*'; 1437 } 1438 1439 return Arg; 1440 } 1441 1442 llvm::Value* CodeGenFunction::EmitAsmInput( 1443 const TargetInfo::ConstraintInfo &Info, 1444 const Expr *InputExpr, 1445 std::string &ConstraintStr) { 1446 if (Info.allowsRegister() || !Info.allowsMemory()) 1447 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 1448 return EmitScalarExpr(InputExpr); 1449 1450 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 1451 LValue Dest = EmitLValue(InputExpr); 1452 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 1453 InputExpr->getExprLoc()); 1454 } 1455 1456 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 1457 /// asm call instruction. The !srcloc MDNode contains a list of constant 1458 /// integers which are the source locations of the start of each line in the 1459 /// asm. 1460 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 1461 CodeGenFunction &CGF) { 1462 SmallVector<llvm::Value *, 8> Locs; 1463 // Add the location of the first line to the MDNode. 1464 Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 1465 Str->getLocStart().getRawEncoding())); 1466 StringRef StrVal = Str->getString(); 1467 if (!StrVal.empty()) { 1468 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 1469 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 1470 1471 // Add the location of the start of each subsequent line of the asm to the 1472 // MDNode. 1473 for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) { 1474 if (StrVal[i] != '\n') continue; 1475 SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts, 1476 CGF.getTarget()); 1477 Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 1478 LineLoc.getRawEncoding())); 1479 } 1480 } 1481 1482 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 1483 } 1484 1485 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 1486 // Assemble the final asm string. 1487 std::string AsmString = S.generateAsmString(getContext()); 1488 1489 // Get all the output and input constraints together. 1490 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 1491 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 1492 1493 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1494 StringRef Name; 1495 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1496 Name = GAS->getOutputName(i); 1497 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 1498 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 1499 assert(IsValid && "Failed to parse output constraint"); 1500 OutputConstraintInfos.push_back(Info); 1501 } 1502 1503 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1504 StringRef Name; 1505 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1506 Name = GAS->getInputName(i); 1507 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 1508 bool IsValid = 1509 getTarget().validateInputConstraint(OutputConstraintInfos.data(), 1510 S.getNumOutputs(), Info); 1511 assert(IsValid && "Failed to parse input constraint"); (void)IsValid; 1512 InputConstraintInfos.push_back(Info); 1513 } 1514 1515 std::string Constraints; 1516 1517 std::vector<LValue> ResultRegDests; 1518 std::vector<QualType> ResultRegQualTys; 1519 std::vector<llvm::Type *> ResultRegTypes; 1520 std::vector<llvm::Type *> ResultTruncRegTypes; 1521 std::vector<llvm::Type *> ArgTypes; 1522 std::vector<llvm::Value*> Args; 1523 1524 // Keep track of inout constraints. 1525 std::string InOutConstraints; 1526 std::vector<llvm::Value*> InOutArgs; 1527 std::vector<llvm::Type*> InOutArgTypes; 1528 1529 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1530 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 1531 1532 // Simplify the output constraint. 1533 std::string OutputConstraint(S.getOutputConstraint(i)); 1534 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 1535 getTarget()); 1536 1537 const Expr *OutExpr = S.getOutputExpr(i); 1538 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 1539 1540 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 1541 getTarget(), CGM, S); 1542 1543 LValue Dest = EmitLValue(OutExpr); 1544 if (!Constraints.empty()) 1545 Constraints += ','; 1546 1547 // If this is a register output, then make the inline asm return it 1548 // by-value. If this is a memory result, return the value by-reference. 1549 if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) { 1550 Constraints += "=" + OutputConstraint; 1551 ResultRegQualTys.push_back(OutExpr->getType()); 1552 ResultRegDests.push_back(Dest); 1553 ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType())); 1554 ResultTruncRegTypes.push_back(ResultRegTypes.back()); 1555 1556 // If this output is tied to an input, and if the input is larger, then 1557 // we need to set the actual result type of the inline asm node to be the 1558 // same as the input type. 1559 if (Info.hasMatchingInput()) { 1560 unsigned InputNo; 1561 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 1562 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 1563 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 1564 break; 1565 } 1566 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 1567 1568 QualType InputTy = S.getInputExpr(InputNo)->getType(); 1569 QualType OutputType = OutExpr->getType(); 1570 1571 uint64_t InputSize = getContext().getTypeSize(InputTy); 1572 if (getContext().getTypeSize(OutputType) < InputSize) { 1573 // Form the asm to return the value as a larger integer or fp type. 1574 ResultRegTypes.back() = ConvertType(InputTy); 1575 } 1576 } 1577 if (llvm::Type* AdjTy = 1578 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 1579 ResultRegTypes.back())) 1580 ResultRegTypes.back() = AdjTy; 1581 else { 1582 CGM.getDiags().Report(S.getAsmLoc(), 1583 diag::err_asm_invalid_type_in_input) 1584 << OutExpr->getType() << OutputConstraint; 1585 } 1586 } else { 1587 ArgTypes.push_back(Dest.getAddress()->getType()); 1588 Args.push_back(Dest.getAddress()); 1589 Constraints += "=*"; 1590 Constraints += OutputConstraint; 1591 } 1592 1593 if (Info.isReadWrite()) { 1594 InOutConstraints += ','; 1595 1596 const Expr *InputExpr = S.getOutputExpr(i); 1597 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(), 1598 InOutConstraints, 1599 InputExpr->getExprLoc()); 1600 1601 if (llvm::Type* AdjTy = 1602 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 1603 Arg->getType())) 1604 Arg = Builder.CreateBitCast(Arg, AdjTy); 1605 1606 if (Info.allowsRegister()) 1607 InOutConstraints += llvm::utostr(i); 1608 else 1609 InOutConstraints += OutputConstraint; 1610 1611 InOutArgTypes.push_back(Arg->getType()); 1612 InOutArgs.push_back(Arg); 1613 } 1614 } 1615 1616 unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs(); 1617 1618 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1619 const Expr *InputExpr = S.getInputExpr(i); 1620 1621 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 1622 1623 if (!Constraints.empty()) 1624 Constraints += ','; 1625 1626 // Simplify the input constraint. 1627 std::string InputConstraint(S.getInputConstraint(i)); 1628 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 1629 &OutputConstraintInfos); 1630 1631 InputConstraint = 1632 AddVariableConstraints(InputConstraint, 1633 *InputExpr->IgnoreParenNoopCasts(getContext()), 1634 getTarget(), CGM, S); 1635 1636 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); 1637 1638 // If this input argument is tied to a larger output result, extend the 1639 // input to be the same size as the output. The LLVM backend wants to see 1640 // the input and output of a matching constraint be the same size. Note 1641 // that GCC does not define what the top bits are here. We use zext because 1642 // that is usually cheaper, but LLVM IR should really get an anyext someday. 1643 if (Info.hasTiedOperand()) { 1644 unsigned Output = Info.getTiedOperand(); 1645 QualType OutputType = S.getOutputExpr(Output)->getType(); 1646 QualType InputTy = InputExpr->getType(); 1647 1648 if (getContext().getTypeSize(OutputType) > 1649 getContext().getTypeSize(InputTy)) { 1650 // Use ptrtoint as appropriate so that we can do our extension. 1651 if (isa<llvm::PointerType>(Arg->getType())) 1652 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 1653 llvm::Type *OutputTy = ConvertType(OutputType); 1654 if (isa<llvm::IntegerType>(OutputTy)) 1655 Arg = Builder.CreateZExt(Arg, OutputTy); 1656 else if (isa<llvm::PointerType>(OutputTy)) 1657 Arg = Builder.CreateZExt(Arg, IntPtrTy); 1658 else { 1659 assert(OutputTy->isFloatingPointTy() && "Unexpected output type"); 1660 Arg = Builder.CreateFPExt(Arg, OutputTy); 1661 } 1662 } 1663 } 1664 if (llvm::Type* AdjTy = 1665 getTargetHooks().adjustInlineAsmType(*this, InputConstraint, 1666 Arg->getType())) 1667 Arg = Builder.CreateBitCast(Arg, AdjTy); 1668 else 1669 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 1670 << InputExpr->getType() << InputConstraint; 1671 1672 ArgTypes.push_back(Arg->getType()); 1673 Args.push_back(Arg); 1674 Constraints += InputConstraint; 1675 } 1676 1677 // Append the "input" part of inout constraints last. 1678 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 1679 ArgTypes.push_back(InOutArgTypes[i]); 1680 Args.push_back(InOutArgs[i]); 1681 } 1682 Constraints += InOutConstraints; 1683 1684 // Clobbers 1685 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 1686 StringRef Clobber = S.getClobber(i); 1687 1688 if (Clobber != "memory" && Clobber != "cc") 1689 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 1690 1691 if (i != 0 || NumConstraints != 0) 1692 Constraints += ','; 1693 1694 Constraints += "~{"; 1695 Constraints += Clobber; 1696 Constraints += '}'; 1697 } 1698 1699 // Add machine specific clobbers 1700 std::string MachineClobbers = getTarget().getClobbers(); 1701 if (!MachineClobbers.empty()) { 1702 if (!Constraints.empty()) 1703 Constraints += ','; 1704 Constraints += MachineClobbers; 1705 } 1706 1707 llvm::Type *ResultType; 1708 if (ResultRegTypes.empty()) 1709 ResultType = VoidTy; 1710 else if (ResultRegTypes.size() == 1) 1711 ResultType = ResultRegTypes[0]; 1712 else 1713 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 1714 1715 llvm::FunctionType *FTy = 1716 llvm::FunctionType::get(ResultType, ArgTypes, false); 1717 1718 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 1719 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 1720 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; 1721 llvm::InlineAsm *IA = 1722 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, 1723 /* IsAlignStack */ false, AsmDialect); 1724 llvm::CallInst *Result = Builder.CreateCall(IA, Args); 1725 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 1726 llvm::Attribute::NoUnwind); 1727 1728 // Slap the source location of the inline asm into a !srcloc metadata on the 1729 // call. FIXME: Handle metadata for MS-style inline asms. 1730 if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) 1731 Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(), 1732 *this)); 1733 1734 // Extract all of the register value results from the asm. 1735 std::vector<llvm::Value*> RegResults; 1736 if (ResultRegTypes.size() == 1) { 1737 RegResults.push_back(Result); 1738 } else { 1739 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 1740 llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult"); 1741 RegResults.push_back(Tmp); 1742 } 1743 } 1744 1745 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 1746 llvm::Value *Tmp = RegResults[i]; 1747 1748 // If the result type of the LLVM IR asm doesn't match the result type of 1749 // the expression, do the conversion. 1750 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { 1751 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 1752 1753 // Truncate the integer result to the right size, note that TruncTy can be 1754 // a pointer. 1755 if (TruncTy->isFloatingPointTy()) 1756 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 1757 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 1758 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 1759 Tmp = Builder.CreateTrunc(Tmp, 1760 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); 1761 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 1762 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 1763 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 1764 Tmp = Builder.CreatePtrToInt(Tmp, 1765 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); 1766 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 1767 } else if (TruncTy->isIntegerTy()) { 1768 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 1769 } else if (TruncTy->isVectorTy()) { 1770 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 1771 } 1772 } 1773 1774 EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]); 1775 } 1776 } 1777 1778 static LValue InitCapturedStruct(CodeGenFunction &CGF, const CapturedStmt &S) { 1779 const RecordDecl *RD = S.getCapturedRecordDecl(); 1780 QualType RecordTy = CGF.getContext().getRecordType(RD); 1781 1782 // Initialize the captured struct. 1783 LValue SlotLV = CGF.MakeNaturalAlignAddrLValue( 1784 CGF.CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 1785 1786 RecordDecl::field_iterator CurField = RD->field_begin(); 1787 for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(), 1788 E = S.capture_init_end(); 1789 I != E; ++I, ++CurField) { 1790 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField); 1791 CGF.EmitInitializerForField(*CurField, LV, *I, ArrayRef<VarDecl *>()); 1792 } 1793 1794 return SlotLV; 1795 } 1796 1797 /// Generate an outlined function for the body of a CapturedStmt, store any 1798 /// captured variables into the captured struct, and call the outlined function. 1799 llvm::Function * 1800 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 1801 const CapturedDecl *CD = S.getCapturedDecl(); 1802 const RecordDecl *RD = S.getCapturedRecordDecl(); 1803 assert(CD->hasBody() && "missing CapturedDecl body"); 1804 1805 LValue CapStruct = InitCapturedStruct(*this, S); 1806 1807 // Emit the CapturedDecl 1808 CodeGenFunction CGF(CGM, true); 1809 CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K); 1810 llvm::Function *F = CGF.GenerateCapturedStmtFunction(CD, RD, S.getLocStart()); 1811 delete CGF.CapturedStmtInfo; 1812 1813 // Emit call to the helper function. 1814 EmitCallOrInvoke(F, CapStruct.getAddress()); 1815 1816 return F; 1817 } 1818 1819 /// Creates the outlined function for a CapturedStmt. 1820 llvm::Function * 1821 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedDecl *CD, 1822 const RecordDecl *RD, 1823 SourceLocation Loc) { 1824 assert(CapturedStmtInfo && 1825 "CapturedStmtInfo should be set when generating the captured function"); 1826 1827 // Build the argument list. 1828 ASTContext &Ctx = CGM.getContext(); 1829 FunctionArgList Args; 1830 Args.append(CD->param_begin(), CD->param_end()); 1831 1832 // Create the function declaration. 1833 FunctionType::ExtInfo ExtInfo; 1834 const CGFunctionInfo &FuncInfo = 1835 CGM.getTypes().arrangeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo, 1836 /*IsVariadic=*/false); 1837 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 1838 1839 llvm::Function *F = 1840 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 1841 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 1842 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 1843 1844 // Generate the function. 1845 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getBody()->getLocStart()); 1846 1847 // Set the context parameter in CapturedStmtInfo. 1848 llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()]; 1849 assert(DeclPtr && "missing context parameter for CapturedStmt"); 1850 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 1851 1852 // If 'this' is captured, load it into CXXThisValue. 1853 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 1854 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 1855 LValue LV = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), 1856 Ctx.getTagDeclType(RD)); 1857 LValue ThisLValue = EmitLValueForField(LV, FD); 1858 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 1859 } 1860 1861 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 1862 FinishFunction(CD->getBodyRBrace()); 1863 1864 return F; 1865 } 1866