1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Stmt nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGDebugInfo.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/StmtVisitor.h" 19 #include "clang/Basic/PrettyStackTrace.h" 20 #include "clang/Basic/TargetInfo.h" 21 #include "clang/Sema/LoopHint.h" 22 #include "clang/Sema/SemaDiagnostic.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/IR/CallSite.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/InlineAsm.h" 27 #include "llvm/IR/Intrinsics.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 //===----------------------------------------------------------------------===// 32 // Statement Emission 33 //===----------------------------------------------------------------------===// 34 35 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 36 if (CGDebugInfo *DI = getDebugInfo()) { 37 SourceLocation Loc; 38 Loc = S->getLocStart(); 39 DI->EmitLocation(Builder, Loc); 40 41 LastStopPoint = Loc; 42 } 43 } 44 45 void CodeGenFunction::EmitStmt(const Stmt *S) { 46 assert(S && "Null statement?"); 47 PGO.setCurrentStmt(S); 48 49 // These statements have their own debug info handling. 50 if (EmitSimpleStmt(S)) 51 return; 52 53 // Check if we are generating unreachable code. 54 if (!HaveInsertPoint()) { 55 // If so, and the statement doesn't contain a label, then we do not need to 56 // generate actual code. This is safe because (1) the current point is 57 // unreachable, so we don't need to execute the code, and (2) we've already 58 // handled the statements which update internal data structures (like the 59 // local variable map) which could be used by subsequent statements. 60 if (!ContainsLabel(S)) { 61 // Verify that any decl statements were handled as simple, they may be in 62 // scope of subsequent reachable statements. 63 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 64 return; 65 } 66 67 // Otherwise, make a new block to hold the code. 68 EnsureInsertPoint(); 69 } 70 71 // Generate a stoppoint if we are emitting debug info. 72 EmitStopPoint(S); 73 74 switch (S->getStmtClass()) { 75 case Stmt::NoStmtClass: 76 case Stmt::CXXCatchStmtClass: 77 case Stmt::SEHExceptStmtClass: 78 case Stmt::SEHFinallyStmtClass: 79 case Stmt::MSDependentExistsStmtClass: 80 llvm_unreachable("invalid statement class to emit generically"); 81 case Stmt::NullStmtClass: 82 case Stmt::CompoundStmtClass: 83 case Stmt::DeclStmtClass: 84 case Stmt::LabelStmtClass: 85 case Stmt::AttributedStmtClass: 86 case Stmt::GotoStmtClass: 87 case Stmt::BreakStmtClass: 88 case Stmt::ContinueStmtClass: 89 case Stmt::DefaultStmtClass: 90 case Stmt::CaseStmtClass: 91 case Stmt::SEHLeaveStmtClass: 92 llvm_unreachable("should have emitted these statements as simple"); 93 94 #define STMT(Type, Base) 95 #define ABSTRACT_STMT(Op) 96 #define EXPR(Type, Base) \ 97 case Stmt::Type##Class: 98 #include "clang/AST/StmtNodes.inc" 99 { 100 // Remember the block we came in on. 101 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 102 assert(incoming && "expression emission must have an insertion point"); 103 104 EmitIgnoredExpr(cast<Expr>(S)); 105 106 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 107 assert(outgoing && "expression emission cleared block!"); 108 109 // The expression emitters assume (reasonably!) that the insertion 110 // point is always set. To maintain that, the call-emission code 111 // for noreturn functions has to enter a new block with no 112 // predecessors. We want to kill that block and mark the current 113 // insertion point unreachable in the common case of a call like 114 // "exit();". Since expression emission doesn't otherwise create 115 // blocks with no predecessors, we can just test for that. 116 // However, we must be careful not to do this to our incoming 117 // block, because *statement* emission does sometimes create 118 // reachable blocks which will have no predecessors until later in 119 // the function. This occurs with, e.g., labels that are not 120 // reachable by fallthrough. 121 if (incoming != outgoing && outgoing->use_empty()) { 122 outgoing->eraseFromParent(); 123 Builder.ClearInsertionPoint(); 124 } 125 break; 126 } 127 128 case Stmt::IndirectGotoStmtClass: 129 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 130 131 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 132 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break; 133 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break; 134 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break; 135 136 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 137 138 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 139 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 140 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 141 case Stmt::CapturedStmtClass: { 142 const CapturedStmt *CS = cast<CapturedStmt>(S); 143 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 144 } 145 break; 146 case Stmt::ObjCAtTryStmtClass: 147 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 148 break; 149 case Stmt::ObjCAtCatchStmtClass: 150 llvm_unreachable( 151 "@catch statements should be handled by EmitObjCAtTryStmt"); 152 case Stmt::ObjCAtFinallyStmtClass: 153 llvm_unreachable( 154 "@finally statements should be handled by EmitObjCAtTryStmt"); 155 case Stmt::ObjCAtThrowStmtClass: 156 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 157 break; 158 case Stmt::ObjCAtSynchronizedStmtClass: 159 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 160 break; 161 case Stmt::ObjCForCollectionStmtClass: 162 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 163 break; 164 case Stmt::ObjCAutoreleasePoolStmtClass: 165 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 166 break; 167 168 case Stmt::CXXTryStmtClass: 169 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 170 break; 171 case Stmt::CXXForRangeStmtClass: 172 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S)); 173 break; 174 case Stmt::SEHTryStmtClass: 175 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 176 break; 177 case Stmt::OMPParallelDirectiveClass: 178 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); 179 break; 180 case Stmt::OMPSimdDirectiveClass: 181 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); 182 break; 183 case Stmt::OMPForDirectiveClass: 184 EmitOMPForDirective(cast<OMPForDirective>(*S)); 185 break; 186 case Stmt::OMPForSimdDirectiveClass: 187 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); 188 break; 189 case Stmt::OMPSectionsDirectiveClass: 190 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); 191 break; 192 case Stmt::OMPSectionDirectiveClass: 193 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); 194 break; 195 case Stmt::OMPSingleDirectiveClass: 196 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); 197 break; 198 case Stmt::OMPMasterDirectiveClass: 199 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); 200 break; 201 case Stmt::OMPCriticalDirectiveClass: 202 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); 203 break; 204 case Stmt::OMPParallelForDirectiveClass: 205 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); 206 break; 207 case Stmt::OMPParallelForSimdDirectiveClass: 208 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); 209 break; 210 case Stmt::OMPParallelSectionsDirectiveClass: 211 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); 212 break; 213 case Stmt::OMPTaskDirectiveClass: 214 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); 215 break; 216 case Stmt::OMPTaskyieldDirectiveClass: 217 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); 218 break; 219 case Stmt::OMPBarrierDirectiveClass: 220 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); 221 break; 222 case Stmt::OMPTaskwaitDirectiveClass: 223 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); 224 break; 225 case Stmt::OMPFlushDirectiveClass: 226 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); 227 break; 228 case Stmt::OMPOrderedDirectiveClass: 229 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); 230 break; 231 case Stmt::OMPAtomicDirectiveClass: 232 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); 233 break; 234 case Stmt::OMPTargetDirectiveClass: 235 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); 236 break; 237 case Stmt::OMPTeamsDirectiveClass: 238 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); 239 break; 240 } 241 } 242 243 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { 244 switch (S->getStmtClass()) { 245 default: return false; 246 case Stmt::NullStmtClass: break; 247 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; 248 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; 249 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; 250 case Stmt::AttributedStmtClass: 251 EmitAttributedStmt(cast<AttributedStmt>(*S)); break; 252 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; 253 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break; 254 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; 255 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; 256 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; 257 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break; 258 } 259 260 return true; 261 } 262 263 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 264 /// this captures the expression result of the last sub-statement and returns it 265 /// (for use by the statement expression extension). 266 llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 267 AggValueSlot AggSlot) { 268 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 269 "LLVM IR generation of compound statement ('{}')"); 270 271 // Keep track of the current cleanup stack depth, including debug scopes. 272 LexicalScope Scope(*this, S.getSourceRange()); 273 274 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 275 } 276 277 llvm::Value* 278 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 279 bool GetLast, 280 AggValueSlot AggSlot) { 281 282 for (CompoundStmt::const_body_iterator I = S.body_begin(), 283 E = S.body_end()-GetLast; I != E; ++I) 284 EmitStmt(*I); 285 286 llvm::Value *RetAlloca = nullptr; 287 if (GetLast) { 288 // We have to special case labels here. They are statements, but when put 289 // at the end of a statement expression, they yield the value of their 290 // subexpression. Handle this by walking through all labels we encounter, 291 // emitting them before we evaluate the subexpr. 292 const Stmt *LastStmt = S.body_back(); 293 while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) { 294 EmitLabel(LS->getDecl()); 295 LastStmt = LS->getSubStmt(); 296 } 297 298 EnsureInsertPoint(); 299 300 QualType ExprTy = cast<Expr>(LastStmt)->getType(); 301 if (hasAggregateEvaluationKind(ExprTy)) { 302 EmitAggExpr(cast<Expr>(LastStmt), AggSlot); 303 } else { 304 // We can't return an RValue here because there might be cleanups at 305 // the end of the StmtExpr. Because of that, we have to emit the result 306 // here into a temporary alloca. 307 RetAlloca = CreateMemTemp(ExprTy); 308 EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(), 309 /*IsInit*/false); 310 } 311 312 } 313 314 return RetAlloca; 315 } 316 317 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 318 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 319 320 // If there is a cleanup stack, then we it isn't worth trying to 321 // simplify this block (we would need to remove it from the scope map 322 // and cleanup entry). 323 if (!EHStack.empty()) 324 return; 325 326 // Can only simplify direct branches. 327 if (!BI || !BI->isUnconditional()) 328 return; 329 330 // Can only simplify empty blocks. 331 if (BI != BB->begin()) 332 return; 333 334 BB->replaceAllUsesWith(BI->getSuccessor(0)); 335 BI->eraseFromParent(); 336 BB->eraseFromParent(); 337 } 338 339 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 340 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 341 342 // Fall out of the current block (if necessary). 343 EmitBranch(BB); 344 345 if (IsFinished && BB->use_empty()) { 346 delete BB; 347 return; 348 } 349 350 // Place the block after the current block, if possible, or else at 351 // the end of the function. 352 if (CurBB && CurBB->getParent()) 353 CurFn->getBasicBlockList().insertAfter(CurBB, BB); 354 else 355 CurFn->getBasicBlockList().push_back(BB); 356 Builder.SetInsertPoint(BB); 357 } 358 359 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 360 // Emit a branch from the current block to the target one if this 361 // was a real block. If this was just a fall-through block after a 362 // terminator, don't emit it. 363 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 364 365 if (!CurBB || CurBB->getTerminator()) { 366 // If there is no insert point or the previous block is already 367 // terminated, don't touch it. 368 } else { 369 // Otherwise, create a fall-through branch. 370 Builder.CreateBr(Target); 371 } 372 373 Builder.ClearInsertionPoint(); 374 } 375 376 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 377 bool inserted = false; 378 for (llvm::User *u : block->users()) { 379 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { 380 CurFn->getBasicBlockList().insertAfter(insn->getParent(), block); 381 inserted = true; 382 break; 383 } 384 } 385 386 if (!inserted) 387 CurFn->getBasicBlockList().push_back(block); 388 389 Builder.SetInsertPoint(block); 390 } 391 392 CodeGenFunction::JumpDest 393 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 394 JumpDest &Dest = LabelMap[D]; 395 if (Dest.isValid()) return Dest; 396 397 // Create, but don't insert, the new block. 398 Dest = JumpDest(createBasicBlock(D->getName()), 399 EHScopeStack::stable_iterator::invalid(), 400 NextCleanupDestIndex++); 401 return Dest; 402 } 403 404 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 405 // Add this label to the current lexical scope if we're within any 406 // normal cleanups. Jumps "in" to this label --- when permitted by 407 // the language --- may need to be routed around such cleanups. 408 if (EHStack.hasNormalCleanups() && CurLexicalScope) 409 CurLexicalScope->addLabel(D); 410 411 JumpDest &Dest = LabelMap[D]; 412 413 // If we didn't need a forward reference to this label, just go 414 // ahead and create a destination at the current scope. 415 if (!Dest.isValid()) { 416 Dest = getJumpDestInCurrentScope(D->getName()); 417 418 // Otherwise, we need to give this label a target depth and remove 419 // it from the branch-fixups list. 420 } else { 421 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 422 Dest.setScopeDepth(EHStack.stable_begin()); 423 ResolveBranchFixups(Dest.getBlock()); 424 } 425 426 EmitBlock(Dest.getBlock()); 427 incrementProfileCounter(D->getStmt()); 428 } 429 430 /// Change the cleanup scope of the labels in this lexical scope to 431 /// match the scope of the enclosing context. 432 void CodeGenFunction::LexicalScope::rescopeLabels() { 433 assert(!Labels.empty()); 434 EHScopeStack::stable_iterator innermostScope 435 = CGF.EHStack.getInnermostNormalCleanup(); 436 437 // Change the scope depth of all the labels. 438 for (SmallVectorImpl<const LabelDecl*>::const_iterator 439 i = Labels.begin(), e = Labels.end(); i != e; ++i) { 440 assert(CGF.LabelMap.count(*i)); 441 JumpDest &dest = CGF.LabelMap.find(*i)->second; 442 assert(dest.getScopeDepth().isValid()); 443 assert(innermostScope.encloses(dest.getScopeDepth())); 444 dest.setScopeDepth(innermostScope); 445 } 446 447 // Reparent the labels if the new scope also has cleanups. 448 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 449 ParentScope->Labels.append(Labels.begin(), Labels.end()); 450 } 451 } 452 453 454 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 455 EmitLabel(S.getDecl()); 456 EmitStmt(S.getSubStmt()); 457 } 458 459 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 460 const Stmt *SubStmt = S.getSubStmt(); 461 switch (SubStmt->getStmtClass()) { 462 case Stmt::DoStmtClass: 463 EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs()); 464 break; 465 case Stmt::ForStmtClass: 466 EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs()); 467 break; 468 case Stmt::WhileStmtClass: 469 EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs()); 470 break; 471 case Stmt::CXXForRangeStmtClass: 472 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs()); 473 break; 474 default: 475 EmitStmt(SubStmt); 476 } 477 } 478 479 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 480 // If this code is reachable then emit a stop point (if generating 481 // debug info). We have to do this ourselves because we are on the 482 // "simple" statement path. 483 if (HaveInsertPoint()) 484 EmitStopPoint(&S); 485 486 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 487 } 488 489 490 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 491 if (const LabelDecl *Target = S.getConstantTarget()) { 492 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 493 return; 494 } 495 496 // Ensure that we have an i8* for our PHI node. 497 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 498 Int8PtrTy, "addr"); 499 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 500 501 // Get the basic block for the indirect goto. 502 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 503 504 // The first instruction in the block has to be the PHI for the switch dest, 505 // add an entry for this branch. 506 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 507 508 EmitBranch(IndGotoBB); 509 } 510 511 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 512 // C99 6.8.4.1: The first substatement is executed if the expression compares 513 // unequal to 0. The condition must be a scalar type. 514 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); 515 516 if (S.getConditionVariable()) 517 EmitAutoVarDecl(*S.getConditionVariable()); 518 519 // If the condition constant folds and can be elided, try to avoid emitting 520 // the condition and the dead arm of the if/else. 521 bool CondConstant; 522 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) { 523 // Figure out which block (then or else) is executed. 524 const Stmt *Executed = S.getThen(); 525 const Stmt *Skipped = S.getElse(); 526 if (!CondConstant) // Condition false? 527 std::swap(Executed, Skipped); 528 529 // If the skipped block has no labels in it, just emit the executed block. 530 // This avoids emitting dead code and simplifies the CFG substantially. 531 if (!ContainsLabel(Skipped)) { 532 if (CondConstant) 533 incrementProfileCounter(&S); 534 if (Executed) { 535 RunCleanupsScope ExecutedScope(*this); 536 EmitStmt(Executed); 537 } 538 return; 539 } 540 } 541 542 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 543 // the conditional branch. 544 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 545 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 546 llvm::BasicBlock *ElseBlock = ContBlock; 547 if (S.getElse()) 548 ElseBlock = createBasicBlock("if.else"); 549 550 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, 551 getProfileCount(S.getThen())); 552 553 // Emit the 'then' code. 554 EmitBlock(ThenBlock); 555 incrementProfileCounter(&S); 556 { 557 RunCleanupsScope ThenScope(*this); 558 EmitStmt(S.getThen()); 559 } 560 EmitBranch(ContBlock); 561 562 // Emit the 'else' code if present. 563 if (const Stmt *Else = S.getElse()) { 564 { 565 // There is no need to emit line number for an unconditional branch. 566 auto NL = ApplyDebugLocation::CreateEmpty(*this); 567 EmitBlock(ElseBlock); 568 } 569 { 570 RunCleanupsScope ElseScope(*this); 571 EmitStmt(Else); 572 } 573 { 574 // There is no need to emit line number for an unconditional branch. 575 auto NL = ApplyDebugLocation::CreateEmpty(*this); 576 EmitBranch(ContBlock); 577 } 578 } 579 580 // Emit the continuation block for code after the if. 581 EmitBlock(ContBlock, true); 582 } 583 584 void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context, 585 llvm::BranchInst *CondBr, 586 ArrayRef<const Attr *> Attrs) { 587 // Return if there are no hints. 588 if (Attrs.empty()) 589 return; 590 591 // Add vectorize and unroll hints to the metadata on the conditional branch. 592 // 593 // FIXME: Should this really start with a size of 1? 594 SmallVector<llvm::Metadata *, 2> Metadata(1); 595 for (const auto *Attr : Attrs) { 596 const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr); 597 598 // Skip non loop hint attributes 599 if (!LH) 600 continue; 601 602 LoopHintAttr::OptionType Option = LH->getOption(); 603 LoopHintAttr::LoopHintState State = LH->getState(); 604 const char *MetadataName; 605 switch (Option) { 606 case LoopHintAttr::Vectorize: 607 case LoopHintAttr::VectorizeWidth: 608 MetadataName = "llvm.loop.vectorize.width"; 609 break; 610 case LoopHintAttr::Interleave: 611 case LoopHintAttr::InterleaveCount: 612 MetadataName = "llvm.loop.interleave.count"; 613 break; 614 case LoopHintAttr::Unroll: 615 // With the unroll loop hint, a non-zero value indicates full unrolling. 616 MetadataName = State == LoopHintAttr::Disable ? "llvm.loop.unroll.disable" 617 : "llvm.loop.unroll.full"; 618 break; 619 case LoopHintAttr::UnrollCount: 620 MetadataName = "llvm.loop.unroll.count"; 621 break; 622 } 623 624 Expr *ValueExpr = LH->getValue(); 625 int ValueInt = 1; 626 if (ValueExpr) { 627 llvm::APSInt ValueAPS = 628 ValueExpr->EvaluateKnownConstInt(CGM.getContext()); 629 ValueInt = static_cast<int>(ValueAPS.getSExtValue()); 630 } 631 632 llvm::Constant *Value; 633 llvm::MDString *Name; 634 switch (Option) { 635 case LoopHintAttr::Vectorize: 636 case LoopHintAttr::Interleave: 637 if (State != LoopHintAttr::Disable) { 638 // FIXME: In the future I will modifiy the behavior of the metadata 639 // so we can enable/disable vectorization and interleaving separately. 640 Name = llvm::MDString::get(Context, "llvm.loop.vectorize.enable"); 641 Value = Builder.getTrue(); 642 break; 643 } 644 // Vectorization/interleaving is disabled, set width/count to 1. 645 ValueInt = 1; 646 // Fallthrough. 647 case LoopHintAttr::VectorizeWidth: 648 case LoopHintAttr::InterleaveCount: 649 case LoopHintAttr::UnrollCount: 650 Name = llvm::MDString::get(Context, MetadataName); 651 Value = llvm::ConstantInt::get(Int32Ty, ValueInt); 652 break; 653 case LoopHintAttr::Unroll: 654 Name = llvm::MDString::get(Context, MetadataName); 655 Value = nullptr; 656 break; 657 } 658 659 SmallVector<llvm::Metadata *, 2> OpValues; 660 OpValues.push_back(Name); 661 if (Value) 662 OpValues.push_back(llvm::ConstantAsMetadata::get(Value)); 663 664 // Set or overwrite metadata indicated by Name. 665 Metadata.push_back(llvm::MDNode::get(Context, OpValues)); 666 } 667 668 // FIXME: This condition is never false. Should it be an assert? 669 if (!Metadata.empty()) { 670 // Add llvm.loop MDNode to CondBr. 671 llvm::MDNode *LoopID = llvm::MDNode::get(Context, Metadata); 672 LoopID->replaceOperandWith(0, LoopID); // First op points to itself. 673 674 CondBr->setMetadata("llvm.loop", LoopID); 675 } 676 } 677 678 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, 679 ArrayRef<const Attr *> WhileAttrs) { 680 // Emit the header for the loop, which will also become 681 // the continue target. 682 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 683 EmitBlock(LoopHeader.getBlock()); 684 685 LoopStack.push(LoopHeader.getBlock()); 686 687 // Create an exit block for when the condition fails, which will 688 // also become the break target. 689 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 690 691 // Store the blocks to use for break and continue. 692 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 693 694 // C++ [stmt.while]p2: 695 // When the condition of a while statement is a declaration, the 696 // scope of the variable that is declared extends from its point 697 // of declaration (3.3.2) to the end of the while statement. 698 // [...] 699 // The object created in a condition is destroyed and created 700 // with each iteration of the loop. 701 RunCleanupsScope ConditionScope(*this); 702 703 if (S.getConditionVariable()) 704 EmitAutoVarDecl(*S.getConditionVariable()); 705 706 // Evaluate the conditional in the while header. C99 6.8.5.1: The 707 // evaluation of the controlling expression takes place before each 708 // execution of the loop body. 709 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 710 711 // while(1) is common, avoid extra exit blocks. Be sure 712 // to correctly handle break/continue though. 713 bool EmitBoolCondBranch = true; 714 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 715 if (C->isOne()) 716 EmitBoolCondBranch = false; 717 718 // As long as the condition is true, go to the loop body. 719 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 720 if (EmitBoolCondBranch) { 721 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 722 if (ConditionScope.requiresCleanups()) 723 ExitBlock = createBasicBlock("while.exit"); 724 llvm::BranchInst *CondBr = Builder.CreateCondBr( 725 BoolCondVal, LoopBody, ExitBlock, 726 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 727 728 if (ExitBlock != LoopExit.getBlock()) { 729 EmitBlock(ExitBlock); 730 EmitBranchThroughCleanup(LoopExit); 731 } 732 733 // Attach metadata to loop body conditional branch. 734 EmitCondBrHints(LoopBody->getContext(), CondBr, WhileAttrs); 735 } 736 737 // Emit the loop body. We have to emit this in a cleanup scope 738 // because it might be a singleton DeclStmt. 739 { 740 RunCleanupsScope BodyScope(*this); 741 EmitBlock(LoopBody); 742 incrementProfileCounter(&S); 743 EmitStmt(S.getBody()); 744 } 745 746 BreakContinueStack.pop_back(); 747 748 // Immediately force cleanup. 749 ConditionScope.ForceCleanup(); 750 751 EmitStopPoint(&S); 752 // Branch to the loop header again. 753 EmitBranch(LoopHeader.getBlock()); 754 755 LoopStack.pop(); 756 757 // Emit the exit block. 758 EmitBlock(LoopExit.getBlock(), true); 759 760 // The LoopHeader typically is just a branch if we skipped emitting 761 // a branch, try to erase it. 762 if (!EmitBoolCondBranch) 763 SimplifyForwardingBlocks(LoopHeader.getBlock()); 764 } 765 766 void CodeGenFunction::EmitDoStmt(const DoStmt &S, 767 ArrayRef<const Attr *> DoAttrs) { 768 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 769 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 770 771 uint64_t ParentCount = getCurrentProfileCount(); 772 773 // Store the blocks to use for break and continue. 774 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 775 776 // Emit the body of the loop. 777 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 778 779 LoopStack.push(LoopBody); 780 781 EmitBlockWithFallThrough(LoopBody, &S); 782 { 783 RunCleanupsScope BodyScope(*this); 784 EmitStmt(S.getBody()); 785 } 786 787 EmitBlock(LoopCond.getBlock()); 788 789 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 790 // after each execution of the loop body." 791 792 // Evaluate the conditional in the while header. 793 // C99 6.8.5p2/p4: The first substatement is executed if the expression 794 // compares unequal to 0. The condition must be a scalar type. 795 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 796 797 BreakContinueStack.pop_back(); 798 799 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 800 // to correctly handle break/continue though. 801 bool EmitBoolCondBranch = true; 802 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 803 if (C->isZero()) 804 EmitBoolCondBranch = false; 805 806 // As long as the condition is true, iterate the loop. 807 if (EmitBoolCondBranch) { 808 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; 809 llvm::BranchInst *CondBr = Builder.CreateCondBr( 810 BoolCondVal, LoopBody, LoopExit.getBlock(), 811 createProfileWeightsForLoop(S.getCond(), BackedgeCount)); 812 813 // Attach metadata to loop body conditional branch. 814 EmitCondBrHints(LoopBody->getContext(), CondBr, DoAttrs); 815 } 816 817 LoopStack.pop(); 818 819 // Emit the exit block. 820 EmitBlock(LoopExit.getBlock()); 821 822 // The DoCond block typically is just a branch if we skipped 823 // emitting a branch, try to erase it. 824 if (!EmitBoolCondBranch) 825 SimplifyForwardingBlocks(LoopCond.getBlock()); 826 } 827 828 void CodeGenFunction::EmitForStmt(const ForStmt &S, 829 ArrayRef<const Attr *> ForAttrs) { 830 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 831 832 LexicalScope ForScope(*this, S.getSourceRange()); 833 834 // Evaluate the first part before the loop. 835 if (S.getInit()) 836 EmitStmt(S.getInit()); 837 838 // Start the loop with a block that tests the condition. 839 // If there's an increment, the continue scope will be overwritten 840 // later. 841 JumpDest Continue = getJumpDestInCurrentScope("for.cond"); 842 llvm::BasicBlock *CondBlock = Continue.getBlock(); 843 EmitBlock(CondBlock); 844 845 LoopStack.push(CondBlock); 846 847 // If the for loop doesn't have an increment we can just use the 848 // condition as the continue block. Otherwise we'll need to create 849 // a block for it (in the current scope, i.e. in the scope of the 850 // condition), and that we will become our continue block. 851 if (S.getInc()) 852 Continue = getJumpDestInCurrentScope("for.inc"); 853 854 // Store the blocks to use for break and continue. 855 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 856 857 // Create a cleanup scope for the condition variable cleanups. 858 LexicalScope ConditionScope(*this, S.getSourceRange()); 859 860 if (S.getCond()) { 861 // If the for statement has a condition scope, emit the local variable 862 // declaration. 863 if (S.getConditionVariable()) { 864 EmitAutoVarDecl(*S.getConditionVariable()); 865 } 866 867 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 868 // If there are any cleanups between here and the loop-exit scope, 869 // create a block to stage a loop exit along. 870 if (ForScope.requiresCleanups()) 871 ExitBlock = createBasicBlock("for.cond.cleanup"); 872 873 // As long as the condition is true, iterate the loop. 874 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 875 876 // C99 6.8.5p2/p4: The first substatement is executed if the expression 877 // compares unequal to 0. The condition must be a scalar type. 878 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 879 llvm::BranchInst *CondBr = Builder.CreateCondBr( 880 BoolCondVal, ForBody, ExitBlock, 881 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 882 883 // Attach metadata to loop body conditional branch. 884 EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs); 885 886 if (ExitBlock != LoopExit.getBlock()) { 887 EmitBlock(ExitBlock); 888 EmitBranchThroughCleanup(LoopExit); 889 } 890 891 EmitBlock(ForBody); 892 } else { 893 // Treat it as a non-zero constant. Don't even create a new block for the 894 // body, just fall into it. 895 } 896 incrementProfileCounter(&S); 897 898 { 899 // Create a separate cleanup scope for the body, in case it is not 900 // a compound statement. 901 RunCleanupsScope BodyScope(*this); 902 EmitStmt(S.getBody()); 903 } 904 905 // If there is an increment, emit it next. 906 if (S.getInc()) { 907 EmitBlock(Continue.getBlock()); 908 EmitStmt(S.getInc()); 909 } 910 911 BreakContinueStack.pop_back(); 912 913 ConditionScope.ForceCleanup(); 914 915 EmitStopPoint(&S); 916 EmitBranch(CondBlock); 917 918 ForScope.ForceCleanup(); 919 920 LoopStack.pop(); 921 922 // Emit the fall-through block. 923 EmitBlock(LoopExit.getBlock(), true); 924 } 925 926 void 927 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, 928 ArrayRef<const Attr *> ForAttrs) { 929 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 930 931 LexicalScope ForScope(*this, S.getSourceRange()); 932 933 // Evaluate the first pieces before the loop. 934 EmitStmt(S.getRangeStmt()); 935 EmitStmt(S.getBeginEndStmt()); 936 937 // Start the loop with a block that tests the condition. 938 // If there's an increment, the continue scope will be overwritten 939 // later. 940 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 941 EmitBlock(CondBlock); 942 943 LoopStack.push(CondBlock); 944 945 // If there are any cleanups between here and the loop-exit scope, 946 // create a block to stage a loop exit along. 947 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 948 if (ForScope.requiresCleanups()) 949 ExitBlock = createBasicBlock("for.cond.cleanup"); 950 951 // The loop body, consisting of the specified body and the loop variable. 952 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 953 954 // The body is executed if the expression, contextually converted 955 // to bool, is true. 956 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 957 llvm::BranchInst *CondBr = Builder.CreateCondBr( 958 BoolCondVal, ForBody, ExitBlock, 959 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 960 961 // Attach metadata to loop body conditional branch. 962 EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs); 963 964 if (ExitBlock != LoopExit.getBlock()) { 965 EmitBlock(ExitBlock); 966 EmitBranchThroughCleanup(LoopExit); 967 } 968 969 EmitBlock(ForBody); 970 incrementProfileCounter(&S); 971 972 // Create a block for the increment. In case of a 'continue', we jump there. 973 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 974 975 // Store the blocks to use for break and continue. 976 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 977 978 { 979 // Create a separate cleanup scope for the loop variable and body. 980 LexicalScope BodyScope(*this, S.getSourceRange()); 981 EmitStmt(S.getLoopVarStmt()); 982 EmitStmt(S.getBody()); 983 } 984 985 EmitStopPoint(&S); 986 // If there is an increment, emit it next. 987 EmitBlock(Continue.getBlock()); 988 EmitStmt(S.getInc()); 989 990 BreakContinueStack.pop_back(); 991 992 EmitBranch(CondBlock); 993 994 ForScope.ForceCleanup(); 995 996 LoopStack.pop(); 997 998 // Emit the fall-through block. 999 EmitBlock(LoopExit.getBlock(), true); 1000 } 1001 1002 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 1003 if (RV.isScalar()) { 1004 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 1005 } else if (RV.isAggregate()) { 1006 EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty); 1007 } else { 1008 EmitStoreOfComplex(RV.getComplexVal(), 1009 MakeNaturalAlignAddrLValue(ReturnValue, Ty), 1010 /*init*/ true); 1011 } 1012 EmitBranchThroughCleanup(ReturnBlock); 1013 } 1014 1015 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 1016 /// if the function returns void, or may be missing one if the function returns 1017 /// non-void. Fun stuff :). 1018 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 1019 // Returning from an outlined SEH helper is UB, and we already warn on it. 1020 if (IsOutlinedSEHHelper) { 1021 Builder.CreateUnreachable(); 1022 Builder.ClearInsertionPoint(); 1023 } 1024 1025 // Emit the result value, even if unused, to evalute the side effects. 1026 const Expr *RV = S.getRetValue(); 1027 1028 // Treat block literals in a return expression as if they appeared 1029 // in their own scope. This permits a small, easily-implemented 1030 // exception to our over-conservative rules about not jumping to 1031 // statements following block literals with non-trivial cleanups. 1032 RunCleanupsScope cleanupScope(*this); 1033 if (const ExprWithCleanups *cleanups = 1034 dyn_cast_or_null<ExprWithCleanups>(RV)) { 1035 enterFullExpression(cleanups); 1036 RV = cleanups->getSubExpr(); 1037 } 1038 1039 // FIXME: Clean this up by using an LValue for ReturnTemp, 1040 // EmitStoreThroughLValue, and EmitAnyExpr. 1041 if (getLangOpts().ElideConstructors && 1042 S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { 1043 // Apply the named return value optimization for this return statement, 1044 // which means doing nothing: the appropriate result has already been 1045 // constructed into the NRVO variable. 1046 1047 // If there is an NRVO flag for this variable, set it to 1 into indicate 1048 // that the cleanup code should not destroy the variable. 1049 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 1050 Builder.CreateStore(Builder.getTrue(), NRVOFlag); 1051 } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) { 1052 // Make sure not to return anything, but evaluate the expression 1053 // for side effects. 1054 if (RV) 1055 EmitAnyExpr(RV); 1056 } else if (!RV) { 1057 // Do nothing (return value is left uninitialized) 1058 } else if (FnRetTy->isReferenceType()) { 1059 // If this function returns a reference, take the address of the expression 1060 // rather than the value. 1061 RValue Result = EmitReferenceBindingToExpr(RV); 1062 Builder.CreateStore(Result.getScalarVal(), ReturnValue); 1063 } else { 1064 switch (getEvaluationKind(RV->getType())) { 1065 case TEK_Scalar: 1066 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); 1067 break; 1068 case TEK_Complex: 1069 EmitComplexExprIntoLValue(RV, 1070 MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()), 1071 /*isInit*/ true); 1072 break; 1073 case TEK_Aggregate: { 1074 CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType()); 1075 EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, 1076 Qualifiers(), 1077 AggValueSlot::IsDestructed, 1078 AggValueSlot::DoesNotNeedGCBarriers, 1079 AggValueSlot::IsNotAliased)); 1080 break; 1081 } 1082 } 1083 } 1084 1085 ++NumReturnExprs; 1086 if (!RV || RV->isEvaluatable(getContext())) 1087 ++NumSimpleReturnExprs; 1088 1089 cleanupScope.ForceCleanup(); 1090 EmitBranchThroughCleanup(ReturnBlock); 1091 } 1092 1093 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 1094 // As long as debug info is modeled with instructions, we have to ensure we 1095 // have a place to insert here and write the stop point here. 1096 if (HaveInsertPoint()) 1097 EmitStopPoint(&S); 1098 1099 for (const auto *I : S.decls()) 1100 EmitDecl(*I); 1101 } 1102 1103 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 1104 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 1105 1106 // If this code is reachable then emit a stop point (if generating 1107 // debug info). We have to do this ourselves because we are on the 1108 // "simple" statement path. 1109 if (HaveInsertPoint()) 1110 EmitStopPoint(&S); 1111 1112 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); 1113 } 1114 1115 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 1116 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 1117 1118 // If this code is reachable then emit a stop point (if generating 1119 // debug info). We have to do this ourselves because we are on the 1120 // "simple" statement path. 1121 if (HaveInsertPoint()) 1122 EmitStopPoint(&S); 1123 1124 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); 1125 } 1126 1127 /// EmitCaseStmtRange - If case statement range is not too big then 1128 /// add multiple cases to switch instruction, one for each value within 1129 /// the range. If range is too big then emit "if" condition check. 1130 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { 1131 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 1132 1133 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 1134 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 1135 1136 // Emit the code for this case. We do this first to make sure it is 1137 // properly chained from our predecessor before generating the 1138 // switch machinery to enter this block. 1139 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1140 EmitBlockWithFallThrough(CaseDest, &S); 1141 EmitStmt(S.getSubStmt()); 1142 1143 // If range is empty, do nothing. 1144 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 1145 return; 1146 1147 llvm::APInt Range = RHS - LHS; 1148 // FIXME: parameters such as this should not be hardcoded. 1149 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 1150 // Range is small enough to add multiple switch instruction cases. 1151 uint64_t Total = getProfileCount(&S); 1152 unsigned NCases = Range.getZExtValue() + 1; 1153 // We only have one region counter for the entire set of cases here, so we 1154 // need to divide the weights evenly between the generated cases, ensuring 1155 // that the total weight is preserved. E.g., a weight of 5 over three cases 1156 // will be distributed as weights of 2, 2, and 1. 1157 uint64_t Weight = Total / NCases, Rem = Total % NCases; 1158 for (unsigned I = 0; I != NCases; ++I) { 1159 if (SwitchWeights) 1160 SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); 1161 if (Rem) 1162 Rem--; 1163 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 1164 LHS++; 1165 } 1166 return; 1167 } 1168 1169 // The range is too big. Emit "if" condition into a new block, 1170 // making sure to save and restore the current insertion point. 1171 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 1172 1173 // Push this test onto the chain of range checks (which terminates 1174 // in the default basic block). The switch's default will be changed 1175 // to the top of this chain after switch emission is complete. 1176 llvm::BasicBlock *FalseDest = CaseRangeBlock; 1177 CaseRangeBlock = createBasicBlock("sw.caserange"); 1178 1179 CurFn->getBasicBlockList().push_back(CaseRangeBlock); 1180 Builder.SetInsertPoint(CaseRangeBlock); 1181 1182 // Emit range check. 1183 llvm::Value *Diff = 1184 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 1185 llvm::Value *Cond = 1186 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 1187 1188 llvm::MDNode *Weights = nullptr; 1189 if (SwitchWeights) { 1190 uint64_t ThisCount = getProfileCount(&S); 1191 uint64_t DefaultCount = (*SwitchWeights)[0]; 1192 Weights = createProfileWeights(ThisCount, DefaultCount); 1193 1194 // Since we're chaining the switch default through each large case range, we 1195 // need to update the weight for the default, ie, the first case, to include 1196 // this case. 1197 (*SwitchWeights)[0] += ThisCount; 1198 } 1199 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); 1200 1201 // Restore the appropriate insertion point. 1202 if (RestoreBB) 1203 Builder.SetInsertPoint(RestoreBB); 1204 else 1205 Builder.ClearInsertionPoint(); 1206 } 1207 1208 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { 1209 // If there is no enclosing switch instance that we're aware of, then this 1210 // case statement and its block can be elided. This situation only happens 1211 // when we've constant-folded the switch, are emitting the constant case, 1212 // and part of the constant case includes another case statement. For 1213 // instance: switch (4) { case 4: do { case 5: } while (1); } 1214 if (!SwitchInsn) { 1215 EmitStmt(S.getSubStmt()); 1216 return; 1217 } 1218 1219 // Handle case ranges. 1220 if (S.getRHS()) { 1221 EmitCaseStmtRange(S); 1222 return; 1223 } 1224 1225 llvm::ConstantInt *CaseVal = 1226 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 1227 1228 // If the body of the case is just a 'break', try to not emit an empty block. 1229 // If we're profiling or we're not optimizing, leave the block in for better 1230 // debug and coverage analysis. 1231 if (!CGM.getCodeGenOpts().ProfileInstrGenerate && 1232 CGM.getCodeGenOpts().OptimizationLevel > 0 && 1233 isa<BreakStmt>(S.getSubStmt())) { 1234 JumpDest Block = BreakContinueStack.back().BreakBlock; 1235 1236 // Only do this optimization if there are no cleanups that need emitting. 1237 if (isObviouslyBranchWithoutCleanups(Block)) { 1238 if (SwitchWeights) 1239 SwitchWeights->push_back(getProfileCount(&S)); 1240 SwitchInsn->addCase(CaseVal, Block.getBlock()); 1241 1242 // If there was a fallthrough into this case, make sure to redirect it to 1243 // the end of the switch as well. 1244 if (Builder.GetInsertBlock()) { 1245 Builder.CreateBr(Block.getBlock()); 1246 Builder.ClearInsertionPoint(); 1247 } 1248 return; 1249 } 1250 } 1251 1252 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1253 EmitBlockWithFallThrough(CaseDest, &S); 1254 if (SwitchWeights) 1255 SwitchWeights->push_back(getProfileCount(&S)); 1256 SwitchInsn->addCase(CaseVal, CaseDest); 1257 1258 // Recursively emitting the statement is acceptable, but is not wonderful for 1259 // code where we have many case statements nested together, i.e.: 1260 // case 1: 1261 // case 2: 1262 // case 3: etc. 1263 // Handling this recursively will create a new block for each case statement 1264 // that falls through to the next case which is IR intensive. It also causes 1265 // deep recursion which can run into stack depth limitations. Handle 1266 // sequential non-range case statements specially. 1267 const CaseStmt *CurCase = &S; 1268 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1269 1270 // Otherwise, iteratively add consecutive cases to this switch stmt. 1271 while (NextCase && NextCase->getRHS() == nullptr) { 1272 CurCase = NextCase; 1273 llvm::ConstantInt *CaseVal = 1274 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1275 1276 if (SwitchWeights) 1277 SwitchWeights->push_back(getProfileCount(NextCase)); 1278 if (CGM.getCodeGenOpts().ProfileInstrGenerate) { 1279 CaseDest = createBasicBlock("sw.bb"); 1280 EmitBlockWithFallThrough(CaseDest, &S); 1281 } 1282 1283 SwitchInsn->addCase(CaseVal, CaseDest); 1284 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1285 } 1286 1287 // Normal default recursion for non-cases. 1288 EmitStmt(CurCase->getSubStmt()); 1289 } 1290 1291 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { 1292 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1293 assert(DefaultBlock->empty() && 1294 "EmitDefaultStmt: Default block already defined?"); 1295 1296 EmitBlockWithFallThrough(DefaultBlock, &S); 1297 1298 EmitStmt(S.getSubStmt()); 1299 } 1300 1301 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1302 /// constant value that is being switched on, see if we can dead code eliminate 1303 /// the body of the switch to a simple series of statements to emit. Basically, 1304 /// on a switch (5) we want to find these statements: 1305 /// case 5: 1306 /// printf(...); <-- 1307 /// ++i; <-- 1308 /// break; 1309 /// 1310 /// and add them to the ResultStmts vector. If it is unsafe to do this 1311 /// transformation (for example, one of the elided statements contains a label 1312 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1313 /// should include statements after it (e.g. the printf() line is a substmt of 1314 /// the case) then return CSFC_FallThrough. If we handled it and found a break 1315 /// statement, then return CSFC_Success. 1316 /// 1317 /// If Case is non-null, then we are looking for the specified case, checking 1318 /// that nothing we jump over contains labels. If Case is null, then we found 1319 /// the case and are looking for the break. 1320 /// 1321 /// If the recursive walk actually finds our Case, then we set FoundCase to 1322 /// true. 1323 /// 1324 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 1325 static CSFC_Result CollectStatementsForCase(const Stmt *S, 1326 const SwitchCase *Case, 1327 bool &FoundCase, 1328 SmallVectorImpl<const Stmt*> &ResultStmts) { 1329 // If this is a null statement, just succeed. 1330 if (!S) 1331 return Case ? CSFC_Success : CSFC_FallThrough; 1332 1333 // If this is the switchcase (case 4: or default) that we're looking for, then 1334 // we're in business. Just add the substatement. 1335 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 1336 if (S == Case) { 1337 FoundCase = true; 1338 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, 1339 ResultStmts); 1340 } 1341 1342 // Otherwise, this is some other case or default statement, just ignore it. 1343 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 1344 ResultStmts); 1345 } 1346 1347 // If we are in the live part of the code and we found our break statement, 1348 // return a success! 1349 if (!Case && isa<BreakStmt>(S)) 1350 return CSFC_Success; 1351 1352 // If this is a switch statement, then it might contain the SwitchCase, the 1353 // break, or neither. 1354 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 1355 // Handle this as two cases: we might be looking for the SwitchCase (if so 1356 // the skipped statements must be skippable) or we might already have it. 1357 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 1358 if (Case) { 1359 // Keep track of whether we see a skipped declaration. The code could be 1360 // using the declaration even if it is skipped, so we can't optimize out 1361 // the decl if the kept statements might refer to it. 1362 bool HadSkippedDecl = false; 1363 1364 // If we're looking for the case, just see if we can skip each of the 1365 // substatements. 1366 for (; Case && I != E; ++I) { 1367 HadSkippedDecl |= isa<DeclStmt>(*I); 1368 1369 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 1370 case CSFC_Failure: return CSFC_Failure; 1371 case CSFC_Success: 1372 // A successful result means that either 1) that the statement doesn't 1373 // have the case and is skippable, or 2) does contain the case value 1374 // and also contains the break to exit the switch. In the later case, 1375 // we just verify the rest of the statements are elidable. 1376 if (FoundCase) { 1377 // If we found the case and skipped declarations, we can't do the 1378 // optimization. 1379 if (HadSkippedDecl) 1380 return CSFC_Failure; 1381 1382 for (++I; I != E; ++I) 1383 if (CodeGenFunction::ContainsLabel(*I, true)) 1384 return CSFC_Failure; 1385 return CSFC_Success; 1386 } 1387 break; 1388 case CSFC_FallThrough: 1389 // If we have a fallthrough condition, then we must have found the 1390 // case started to include statements. Consider the rest of the 1391 // statements in the compound statement as candidates for inclusion. 1392 assert(FoundCase && "Didn't find case but returned fallthrough?"); 1393 // We recursively found Case, so we're not looking for it anymore. 1394 Case = nullptr; 1395 1396 // If we found the case and skipped declarations, we can't do the 1397 // optimization. 1398 if (HadSkippedDecl) 1399 return CSFC_Failure; 1400 break; 1401 } 1402 } 1403 } 1404 1405 // If we have statements in our range, then we know that the statements are 1406 // live and need to be added to the set of statements we're tracking. 1407 for (; I != E; ++I) { 1408 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { 1409 case CSFC_Failure: return CSFC_Failure; 1410 case CSFC_FallThrough: 1411 // A fallthrough result means that the statement was simple and just 1412 // included in ResultStmt, keep adding them afterwards. 1413 break; 1414 case CSFC_Success: 1415 // A successful result means that we found the break statement and 1416 // stopped statement inclusion. We just ensure that any leftover stmts 1417 // are skippable and return success ourselves. 1418 for (++I; I != E; ++I) 1419 if (CodeGenFunction::ContainsLabel(*I, true)) 1420 return CSFC_Failure; 1421 return CSFC_Success; 1422 } 1423 } 1424 1425 return Case ? CSFC_Success : CSFC_FallThrough; 1426 } 1427 1428 // Okay, this is some other statement that we don't handle explicitly, like a 1429 // for statement or increment etc. If we are skipping over this statement, 1430 // just verify it doesn't have labels, which would make it invalid to elide. 1431 if (Case) { 1432 if (CodeGenFunction::ContainsLabel(S, true)) 1433 return CSFC_Failure; 1434 return CSFC_Success; 1435 } 1436 1437 // Otherwise, we want to include this statement. Everything is cool with that 1438 // so long as it doesn't contain a break out of the switch we're in. 1439 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 1440 1441 // Otherwise, everything is great. Include the statement and tell the caller 1442 // that we fall through and include the next statement as well. 1443 ResultStmts.push_back(S); 1444 return CSFC_FallThrough; 1445 } 1446 1447 /// FindCaseStatementsForValue - Find the case statement being jumped to and 1448 /// then invoke CollectStatementsForCase to find the list of statements to emit 1449 /// for a switch on constant. See the comment above CollectStatementsForCase 1450 /// for more details. 1451 static bool FindCaseStatementsForValue(const SwitchStmt &S, 1452 const llvm::APSInt &ConstantCondValue, 1453 SmallVectorImpl<const Stmt*> &ResultStmts, 1454 ASTContext &C, 1455 const SwitchCase *&ResultCase) { 1456 // First step, find the switch case that is being branched to. We can do this 1457 // efficiently by scanning the SwitchCase list. 1458 const SwitchCase *Case = S.getSwitchCaseList(); 1459 const DefaultStmt *DefaultCase = nullptr; 1460 1461 for (; Case; Case = Case->getNextSwitchCase()) { 1462 // It's either a default or case. Just remember the default statement in 1463 // case we're not jumping to any numbered cases. 1464 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 1465 DefaultCase = DS; 1466 continue; 1467 } 1468 1469 // Check to see if this case is the one we're looking for. 1470 const CaseStmt *CS = cast<CaseStmt>(Case); 1471 // Don't handle case ranges yet. 1472 if (CS->getRHS()) return false; 1473 1474 // If we found our case, remember it as 'case'. 1475 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 1476 break; 1477 } 1478 1479 // If we didn't find a matching case, we use a default if it exists, or we 1480 // elide the whole switch body! 1481 if (!Case) { 1482 // It is safe to elide the body of the switch if it doesn't contain labels 1483 // etc. If it is safe, return successfully with an empty ResultStmts list. 1484 if (!DefaultCase) 1485 return !CodeGenFunction::ContainsLabel(&S); 1486 Case = DefaultCase; 1487 } 1488 1489 // Ok, we know which case is being jumped to, try to collect all the 1490 // statements that follow it. This can fail for a variety of reasons. Also, 1491 // check to see that the recursive walk actually found our case statement. 1492 // Insane cases like this can fail to find it in the recursive walk since we 1493 // don't handle every stmt kind: 1494 // switch (4) { 1495 // while (1) { 1496 // case 4: ... 1497 bool FoundCase = false; 1498 ResultCase = Case; 1499 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 1500 ResultStmts) != CSFC_Failure && 1501 FoundCase; 1502 } 1503 1504 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 1505 // Handle nested switch statements. 1506 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 1507 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; 1508 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 1509 1510 // See if we can constant fold the condition of the switch and therefore only 1511 // emit the live case statement (if any) of the switch. 1512 llvm::APSInt ConstantCondValue; 1513 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 1514 SmallVector<const Stmt*, 4> CaseStmts; 1515 const SwitchCase *Case = nullptr; 1516 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 1517 getContext(), Case)) { 1518 if (Case) 1519 incrementProfileCounter(Case); 1520 RunCleanupsScope ExecutedScope(*this); 1521 1522 // Emit the condition variable if needed inside the entire cleanup scope 1523 // used by this special case for constant folded switches. 1524 if (S.getConditionVariable()) 1525 EmitAutoVarDecl(*S.getConditionVariable()); 1526 1527 // At this point, we are no longer "within" a switch instance, so 1528 // we can temporarily enforce this to ensure that any embedded case 1529 // statements are not emitted. 1530 SwitchInsn = nullptr; 1531 1532 // Okay, we can dead code eliminate everything except this case. Emit the 1533 // specified series of statements and we're good. 1534 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) 1535 EmitStmt(CaseStmts[i]); 1536 incrementProfileCounter(&S); 1537 1538 // Now we want to restore the saved switch instance so that nested 1539 // switches continue to function properly 1540 SwitchInsn = SavedSwitchInsn; 1541 1542 return; 1543 } 1544 } 1545 1546 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 1547 1548 RunCleanupsScope ConditionScope(*this); 1549 if (S.getConditionVariable()) 1550 EmitAutoVarDecl(*S.getConditionVariable()); 1551 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 1552 1553 // Create basic block to hold stuff that comes after switch 1554 // statement. We also need to create a default block now so that 1555 // explicit case ranges tests can have a place to jump to on 1556 // failure. 1557 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 1558 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 1559 if (PGO.haveRegionCounts()) { 1560 // Walk the SwitchCase list to find how many there are. 1561 uint64_t DefaultCount = 0; 1562 unsigned NumCases = 0; 1563 for (const SwitchCase *Case = S.getSwitchCaseList(); 1564 Case; 1565 Case = Case->getNextSwitchCase()) { 1566 if (isa<DefaultStmt>(Case)) 1567 DefaultCount = getProfileCount(Case); 1568 NumCases += 1; 1569 } 1570 SwitchWeights = new SmallVector<uint64_t, 16>(); 1571 SwitchWeights->reserve(NumCases); 1572 // The default needs to be first. We store the edge count, so we already 1573 // know the right weight. 1574 SwitchWeights->push_back(DefaultCount); 1575 } 1576 CaseRangeBlock = DefaultBlock; 1577 1578 // Clear the insertion point to indicate we are in unreachable code. 1579 Builder.ClearInsertionPoint(); 1580 1581 // All break statements jump to NextBlock. If BreakContinueStack is non-empty 1582 // then reuse last ContinueBlock. 1583 JumpDest OuterContinue; 1584 if (!BreakContinueStack.empty()) 1585 OuterContinue = BreakContinueStack.back().ContinueBlock; 1586 1587 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 1588 1589 // Emit switch body. 1590 EmitStmt(S.getBody()); 1591 1592 BreakContinueStack.pop_back(); 1593 1594 // Update the default block in case explicit case range tests have 1595 // been chained on top. 1596 SwitchInsn->setDefaultDest(CaseRangeBlock); 1597 1598 // If a default was never emitted: 1599 if (!DefaultBlock->getParent()) { 1600 // If we have cleanups, emit the default block so that there's a 1601 // place to jump through the cleanups from. 1602 if (ConditionScope.requiresCleanups()) { 1603 EmitBlock(DefaultBlock); 1604 1605 // Otherwise, just forward the default block to the switch end. 1606 } else { 1607 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 1608 delete DefaultBlock; 1609 } 1610 } 1611 1612 ConditionScope.ForceCleanup(); 1613 1614 // Emit continuation. 1615 EmitBlock(SwitchExit.getBlock(), true); 1616 incrementProfileCounter(&S); 1617 1618 if (SwitchWeights) { 1619 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && 1620 "switch weights do not match switch cases"); 1621 // If there's only one jump destination there's no sense weighting it. 1622 if (SwitchWeights->size() > 1) 1623 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 1624 createProfileWeights(*SwitchWeights)); 1625 delete SwitchWeights; 1626 } 1627 SwitchInsn = SavedSwitchInsn; 1628 SwitchWeights = SavedSwitchWeights; 1629 CaseRangeBlock = SavedCRBlock; 1630 } 1631 1632 static std::string 1633 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 1634 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { 1635 std::string Result; 1636 1637 while (*Constraint) { 1638 switch (*Constraint) { 1639 default: 1640 Result += Target.convertConstraint(Constraint); 1641 break; 1642 // Ignore these 1643 case '*': 1644 case '?': 1645 case '!': 1646 case '=': // Will see this and the following in mult-alt constraints. 1647 case '+': 1648 break; 1649 case '#': // Ignore the rest of the constraint alternative. 1650 while (Constraint[1] && Constraint[1] != ',') 1651 Constraint++; 1652 break; 1653 case '&': 1654 case '%': 1655 Result += *Constraint; 1656 while (Constraint[1] && Constraint[1] == *Constraint) 1657 Constraint++; 1658 break; 1659 case ',': 1660 Result += "|"; 1661 break; 1662 case 'g': 1663 Result += "imr"; 1664 break; 1665 case '[': { 1666 assert(OutCons && 1667 "Must pass output names to constraints with a symbolic name"); 1668 unsigned Index; 1669 bool result = Target.resolveSymbolicName(Constraint, 1670 &(*OutCons)[0], 1671 OutCons->size(), Index); 1672 assert(result && "Could not resolve symbolic name"); (void)result; 1673 Result += llvm::utostr(Index); 1674 break; 1675 } 1676 } 1677 1678 Constraint++; 1679 } 1680 1681 return Result; 1682 } 1683 1684 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 1685 /// as using a particular register add that as a constraint that will be used 1686 /// in this asm stmt. 1687 static std::string 1688 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 1689 const TargetInfo &Target, CodeGenModule &CGM, 1690 const AsmStmt &Stmt, const bool EarlyClobber) { 1691 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 1692 if (!AsmDeclRef) 1693 return Constraint; 1694 const ValueDecl &Value = *AsmDeclRef->getDecl(); 1695 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 1696 if (!Variable) 1697 return Constraint; 1698 if (Variable->getStorageClass() != SC_Register) 1699 return Constraint; 1700 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 1701 if (!Attr) 1702 return Constraint; 1703 StringRef Register = Attr->getLabel(); 1704 assert(Target.isValidGCCRegisterName(Register)); 1705 // We're using validateOutputConstraint here because we only care if 1706 // this is a register constraint. 1707 TargetInfo::ConstraintInfo Info(Constraint, ""); 1708 if (Target.validateOutputConstraint(Info) && 1709 !Info.allowsRegister()) { 1710 CGM.ErrorUnsupported(&Stmt, "__asm__"); 1711 return Constraint; 1712 } 1713 // Canonicalize the register here before returning it. 1714 Register = Target.getNormalizedGCCRegisterName(Register); 1715 return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; 1716 } 1717 1718 llvm::Value* 1719 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, 1720 LValue InputValue, QualType InputType, 1721 std::string &ConstraintStr, 1722 SourceLocation Loc) { 1723 llvm::Value *Arg; 1724 if (Info.allowsRegister() || !Info.allowsMemory()) { 1725 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { 1726 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal(); 1727 } else { 1728 llvm::Type *Ty = ConvertType(InputType); 1729 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 1730 if (Size <= 64 && llvm::isPowerOf2_64(Size)) { 1731 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 1732 Ty = llvm::PointerType::getUnqual(Ty); 1733 1734 Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(), 1735 Ty)); 1736 } else { 1737 Arg = InputValue.getAddress(); 1738 ConstraintStr += '*'; 1739 } 1740 } 1741 } else { 1742 Arg = InputValue.getAddress(); 1743 ConstraintStr += '*'; 1744 } 1745 1746 return Arg; 1747 } 1748 1749 llvm::Value* CodeGenFunction::EmitAsmInput( 1750 const TargetInfo::ConstraintInfo &Info, 1751 const Expr *InputExpr, 1752 std::string &ConstraintStr) { 1753 if (Info.allowsRegister() || !Info.allowsMemory()) 1754 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 1755 return EmitScalarExpr(InputExpr); 1756 1757 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 1758 LValue Dest = EmitLValue(InputExpr); 1759 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 1760 InputExpr->getExprLoc()); 1761 } 1762 1763 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 1764 /// asm call instruction. The !srcloc MDNode contains a list of constant 1765 /// integers which are the source locations of the start of each line in the 1766 /// asm. 1767 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 1768 CodeGenFunction &CGF) { 1769 SmallVector<llvm::Metadata *, 8> Locs; 1770 // Add the location of the first line to the MDNode. 1771 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 1772 CGF.Int32Ty, Str->getLocStart().getRawEncoding()))); 1773 StringRef StrVal = Str->getString(); 1774 if (!StrVal.empty()) { 1775 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 1776 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 1777 1778 // Add the location of the start of each subsequent line of the asm to the 1779 // MDNode. 1780 for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) { 1781 if (StrVal[i] != '\n') continue; 1782 SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts, 1783 CGF.getTarget()); 1784 Locs.push_back(llvm::ConstantAsMetadata::get( 1785 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding()))); 1786 } 1787 } 1788 1789 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 1790 } 1791 1792 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 1793 // Assemble the final asm string. 1794 std::string AsmString = S.generateAsmString(getContext()); 1795 1796 // Get all the output and input constraints together. 1797 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 1798 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 1799 1800 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1801 StringRef Name; 1802 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1803 Name = GAS->getOutputName(i); 1804 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 1805 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 1806 assert(IsValid && "Failed to parse output constraint"); 1807 OutputConstraintInfos.push_back(Info); 1808 } 1809 1810 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1811 StringRef Name; 1812 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1813 Name = GAS->getInputName(i); 1814 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 1815 bool IsValid = 1816 getTarget().validateInputConstraint(OutputConstraintInfos.data(), 1817 S.getNumOutputs(), Info); 1818 assert(IsValid && "Failed to parse input constraint"); (void)IsValid; 1819 InputConstraintInfos.push_back(Info); 1820 } 1821 1822 std::string Constraints; 1823 1824 std::vector<LValue> ResultRegDests; 1825 std::vector<QualType> ResultRegQualTys; 1826 std::vector<llvm::Type *> ResultRegTypes; 1827 std::vector<llvm::Type *> ResultTruncRegTypes; 1828 std::vector<llvm::Type *> ArgTypes; 1829 std::vector<llvm::Value*> Args; 1830 1831 // Keep track of inout constraints. 1832 std::string InOutConstraints; 1833 std::vector<llvm::Value*> InOutArgs; 1834 std::vector<llvm::Type*> InOutArgTypes; 1835 1836 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1837 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 1838 1839 // Simplify the output constraint. 1840 std::string OutputConstraint(S.getOutputConstraint(i)); 1841 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 1842 getTarget()); 1843 1844 const Expr *OutExpr = S.getOutputExpr(i); 1845 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 1846 1847 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 1848 getTarget(), CGM, S, 1849 Info.earlyClobber()); 1850 1851 LValue Dest = EmitLValue(OutExpr); 1852 if (!Constraints.empty()) 1853 Constraints += ','; 1854 1855 // If this is a register output, then make the inline asm return it 1856 // by-value. If this is a memory result, return the value by-reference. 1857 if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) { 1858 Constraints += "=" + OutputConstraint; 1859 ResultRegQualTys.push_back(OutExpr->getType()); 1860 ResultRegDests.push_back(Dest); 1861 ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType())); 1862 ResultTruncRegTypes.push_back(ResultRegTypes.back()); 1863 1864 // If this output is tied to an input, and if the input is larger, then 1865 // we need to set the actual result type of the inline asm node to be the 1866 // same as the input type. 1867 if (Info.hasMatchingInput()) { 1868 unsigned InputNo; 1869 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 1870 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 1871 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 1872 break; 1873 } 1874 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 1875 1876 QualType InputTy = S.getInputExpr(InputNo)->getType(); 1877 QualType OutputType = OutExpr->getType(); 1878 1879 uint64_t InputSize = getContext().getTypeSize(InputTy); 1880 if (getContext().getTypeSize(OutputType) < InputSize) { 1881 // Form the asm to return the value as a larger integer or fp type. 1882 ResultRegTypes.back() = ConvertType(InputTy); 1883 } 1884 } 1885 if (llvm::Type* AdjTy = 1886 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 1887 ResultRegTypes.back())) 1888 ResultRegTypes.back() = AdjTy; 1889 else { 1890 CGM.getDiags().Report(S.getAsmLoc(), 1891 diag::err_asm_invalid_type_in_input) 1892 << OutExpr->getType() << OutputConstraint; 1893 } 1894 } else { 1895 ArgTypes.push_back(Dest.getAddress()->getType()); 1896 Args.push_back(Dest.getAddress()); 1897 Constraints += "=*"; 1898 Constraints += OutputConstraint; 1899 } 1900 1901 if (Info.isReadWrite()) { 1902 InOutConstraints += ','; 1903 1904 const Expr *InputExpr = S.getOutputExpr(i); 1905 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(), 1906 InOutConstraints, 1907 InputExpr->getExprLoc()); 1908 1909 if (llvm::Type* AdjTy = 1910 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 1911 Arg->getType())) 1912 Arg = Builder.CreateBitCast(Arg, AdjTy); 1913 1914 if (Info.allowsRegister()) 1915 InOutConstraints += llvm::utostr(i); 1916 else 1917 InOutConstraints += OutputConstraint; 1918 1919 InOutArgTypes.push_back(Arg->getType()); 1920 InOutArgs.push_back(Arg); 1921 } 1922 } 1923 1924 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) 1925 // to the return value slot. Only do this when returning in registers. 1926 if (isa<MSAsmStmt>(&S)) { 1927 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 1928 if (RetAI.isDirect() || RetAI.isExtend()) { 1929 // Make a fake lvalue for the return value slot. 1930 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy); 1931 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( 1932 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, 1933 ResultRegDests, AsmString, S.getNumOutputs()); 1934 SawAsmBlock = true; 1935 } 1936 } 1937 1938 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1939 const Expr *InputExpr = S.getInputExpr(i); 1940 1941 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 1942 1943 if (!Constraints.empty()) 1944 Constraints += ','; 1945 1946 // Simplify the input constraint. 1947 std::string InputConstraint(S.getInputConstraint(i)); 1948 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 1949 &OutputConstraintInfos); 1950 1951 InputConstraint = AddVariableConstraints( 1952 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), 1953 getTarget(), CGM, S, false /* No EarlyClobber */); 1954 1955 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); 1956 1957 // If this input argument is tied to a larger output result, extend the 1958 // input to be the same size as the output. The LLVM backend wants to see 1959 // the input and output of a matching constraint be the same size. Note 1960 // that GCC does not define what the top bits are here. We use zext because 1961 // that is usually cheaper, but LLVM IR should really get an anyext someday. 1962 if (Info.hasTiedOperand()) { 1963 unsigned Output = Info.getTiedOperand(); 1964 QualType OutputType = S.getOutputExpr(Output)->getType(); 1965 QualType InputTy = InputExpr->getType(); 1966 1967 if (getContext().getTypeSize(OutputType) > 1968 getContext().getTypeSize(InputTy)) { 1969 // Use ptrtoint as appropriate so that we can do our extension. 1970 if (isa<llvm::PointerType>(Arg->getType())) 1971 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 1972 llvm::Type *OutputTy = ConvertType(OutputType); 1973 if (isa<llvm::IntegerType>(OutputTy)) 1974 Arg = Builder.CreateZExt(Arg, OutputTy); 1975 else if (isa<llvm::PointerType>(OutputTy)) 1976 Arg = Builder.CreateZExt(Arg, IntPtrTy); 1977 else { 1978 assert(OutputTy->isFloatingPointTy() && "Unexpected output type"); 1979 Arg = Builder.CreateFPExt(Arg, OutputTy); 1980 } 1981 } 1982 } 1983 if (llvm::Type* AdjTy = 1984 getTargetHooks().adjustInlineAsmType(*this, InputConstraint, 1985 Arg->getType())) 1986 Arg = Builder.CreateBitCast(Arg, AdjTy); 1987 else 1988 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 1989 << InputExpr->getType() << InputConstraint; 1990 1991 ArgTypes.push_back(Arg->getType()); 1992 Args.push_back(Arg); 1993 Constraints += InputConstraint; 1994 } 1995 1996 // Append the "input" part of inout constraints last. 1997 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 1998 ArgTypes.push_back(InOutArgTypes[i]); 1999 Args.push_back(InOutArgs[i]); 2000 } 2001 Constraints += InOutConstraints; 2002 2003 // Clobbers 2004 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 2005 StringRef Clobber = S.getClobber(i); 2006 2007 if (Clobber != "memory" && Clobber != "cc") 2008 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 2009 2010 if (!Constraints.empty()) 2011 Constraints += ','; 2012 2013 Constraints += "~{"; 2014 Constraints += Clobber; 2015 Constraints += '}'; 2016 } 2017 2018 // Add machine specific clobbers 2019 std::string MachineClobbers = getTarget().getClobbers(); 2020 if (!MachineClobbers.empty()) { 2021 if (!Constraints.empty()) 2022 Constraints += ','; 2023 Constraints += MachineClobbers; 2024 } 2025 2026 llvm::Type *ResultType; 2027 if (ResultRegTypes.empty()) 2028 ResultType = VoidTy; 2029 else if (ResultRegTypes.size() == 1) 2030 ResultType = ResultRegTypes[0]; 2031 else 2032 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 2033 2034 llvm::FunctionType *FTy = 2035 llvm::FunctionType::get(ResultType, ArgTypes, false); 2036 2037 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 2038 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 2039 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; 2040 llvm::InlineAsm *IA = 2041 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, 2042 /* IsAlignStack */ false, AsmDialect); 2043 llvm::CallInst *Result = Builder.CreateCall(IA, Args); 2044 Result->addAttribute(llvm::AttributeSet::FunctionIndex, 2045 llvm::Attribute::NoUnwind); 2046 2047 // Slap the source location of the inline asm into a !srcloc metadata on the 2048 // call. 2049 if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) { 2050 Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(), 2051 *this)); 2052 } else { 2053 // At least put the line number on MS inline asm blobs. 2054 auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding()); 2055 Result->setMetadata("srcloc", 2056 llvm::MDNode::get(getLLVMContext(), 2057 llvm::ConstantAsMetadata::get(Loc))); 2058 } 2059 2060 // Extract all of the register value results from the asm. 2061 std::vector<llvm::Value*> RegResults; 2062 if (ResultRegTypes.size() == 1) { 2063 RegResults.push_back(Result); 2064 } else { 2065 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 2066 llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult"); 2067 RegResults.push_back(Tmp); 2068 } 2069 } 2070 2071 assert(RegResults.size() == ResultRegTypes.size()); 2072 assert(RegResults.size() == ResultTruncRegTypes.size()); 2073 assert(RegResults.size() == ResultRegDests.size()); 2074 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 2075 llvm::Value *Tmp = RegResults[i]; 2076 2077 // If the result type of the LLVM IR asm doesn't match the result type of 2078 // the expression, do the conversion. 2079 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { 2080 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 2081 2082 // Truncate the integer result to the right size, note that TruncTy can be 2083 // a pointer. 2084 if (TruncTy->isFloatingPointTy()) 2085 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 2086 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 2087 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 2088 Tmp = Builder.CreateTrunc(Tmp, 2089 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); 2090 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 2091 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 2092 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 2093 Tmp = Builder.CreatePtrToInt(Tmp, 2094 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); 2095 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2096 } else if (TruncTy->isIntegerTy()) { 2097 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2098 } else if (TruncTy->isVectorTy()) { 2099 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 2100 } 2101 } 2102 2103 EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]); 2104 } 2105 } 2106 2107 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { 2108 const RecordDecl *RD = S.getCapturedRecordDecl(); 2109 QualType RecordTy = getContext().getRecordType(RD); 2110 2111 // Initialize the captured struct. 2112 LValue SlotLV = MakeNaturalAlignAddrLValue( 2113 CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 2114 2115 RecordDecl::field_iterator CurField = RD->field_begin(); 2116 for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(), 2117 E = S.capture_init_end(); 2118 I != E; ++I, ++CurField) { 2119 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 2120 if (CurField->hasCapturedVLAType()) { 2121 auto VAT = CurField->getCapturedVLAType(); 2122 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV); 2123 } else { 2124 EmitInitializerForField(*CurField, LV, *I, None); 2125 } 2126 } 2127 2128 return SlotLV; 2129 } 2130 2131 /// Generate an outlined function for the body of a CapturedStmt, store any 2132 /// captured variables into the captured struct, and call the outlined function. 2133 llvm::Function * 2134 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 2135 LValue CapStruct = InitCapturedStruct(S); 2136 2137 // Emit the CapturedDecl 2138 CodeGenFunction CGF(CGM, true); 2139 CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K); 2140 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); 2141 delete CGF.CapturedStmtInfo; 2142 2143 // Emit call to the helper function. 2144 EmitCallOrInvoke(F, CapStruct.getAddress()); 2145 2146 return F; 2147 } 2148 2149 llvm::Value * 2150 CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { 2151 LValue CapStruct = InitCapturedStruct(S); 2152 return CapStruct.getAddress(); 2153 } 2154 2155 /// Creates the outlined function for a CapturedStmt. 2156 llvm::Function * 2157 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { 2158 assert(CapturedStmtInfo && 2159 "CapturedStmtInfo should be set when generating the captured function"); 2160 const CapturedDecl *CD = S.getCapturedDecl(); 2161 const RecordDecl *RD = S.getCapturedRecordDecl(); 2162 SourceLocation Loc = S.getLocStart(); 2163 assert(CD->hasBody() && "missing CapturedDecl body"); 2164 2165 // Build the argument list. 2166 ASTContext &Ctx = CGM.getContext(); 2167 FunctionArgList Args; 2168 Args.append(CD->param_begin(), CD->param_end()); 2169 2170 // Create the function declaration. 2171 FunctionType::ExtInfo ExtInfo; 2172 const CGFunctionInfo &FuncInfo = 2173 CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo, 2174 /*IsVariadic=*/false); 2175 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 2176 2177 llvm::Function *F = 2178 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 2179 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 2180 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 2181 if (CD->isNothrow()) 2182 F->addFnAttr(llvm::Attribute::NoUnwind); 2183 2184 // Generate the function. 2185 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, 2186 CD->getLocation(), 2187 CD->getBody()->getLocStart()); 2188 // Set the context parameter in CapturedStmtInfo. 2189 llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()]; 2190 assert(DeclPtr && "missing context parameter for CapturedStmt"); 2191 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 2192 2193 // Initialize variable-length arrays. 2194 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), 2195 Ctx.getTagDeclType(RD)); 2196 for (auto *FD : RD->fields()) { 2197 if (FD->hasCapturedVLAType()) { 2198 auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD), 2199 S.getLocStart()).getScalarVal(); 2200 auto VAT = FD->getCapturedVLAType(); 2201 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 2202 } 2203 } 2204 2205 // If 'this' is captured, load it into CXXThisValue. 2206 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 2207 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 2208 LValue ThisLValue = EmitLValueForField(Base, FD); 2209 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 2210 } 2211 2212 PGO.assignRegionCounters(CD, F); 2213 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 2214 FinishFunction(CD->getBodyRBrace()); 2215 2216 return F; 2217 } 2218