1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Stmt nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "TargetInfo.h" 17 #include "clang/AST/Attr.h" 18 #include "clang/AST/StmtVisitor.h" 19 #include "clang/Basic/Builtins.h" 20 #include "clang/Basic/PrettyStackTrace.h" 21 #include "clang/Basic/SourceManager.h" 22 #include "clang/Basic/TargetInfo.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/InlineAsm.h" 26 #include "llvm/IR/Intrinsics.h" 27 #include "llvm/IR/MDBuilder.h" 28 29 using namespace clang; 30 using namespace CodeGen; 31 32 //===----------------------------------------------------------------------===// 33 // Statement Emission 34 //===----------------------------------------------------------------------===// 35 36 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 37 if (CGDebugInfo *DI = getDebugInfo()) { 38 SourceLocation Loc; 39 Loc = S->getBeginLoc(); 40 DI->EmitLocation(Builder, Loc); 41 42 LastStopPoint = Loc; 43 } 44 } 45 46 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) { 47 assert(S && "Null statement?"); 48 PGO.setCurrentStmt(S); 49 50 // These statements have their own debug info handling. 51 if (EmitSimpleStmt(S)) 52 return; 53 54 // Check if we are generating unreachable code. 55 if (!HaveInsertPoint()) { 56 // If so, and the statement doesn't contain a label, then we do not need to 57 // generate actual code. This is safe because (1) the current point is 58 // unreachable, so we don't need to execute the code, and (2) we've already 59 // handled the statements which update internal data structures (like the 60 // local variable map) which could be used by subsequent statements. 61 if (!ContainsLabel(S)) { 62 // Verify that any decl statements were handled as simple, they may be in 63 // scope of subsequent reachable statements. 64 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 65 return; 66 } 67 68 // Otherwise, make a new block to hold the code. 69 EnsureInsertPoint(); 70 } 71 72 // Generate a stoppoint if we are emitting debug info. 73 EmitStopPoint(S); 74 75 // Ignore all OpenMP directives except for simd if OpenMP with Simd is 76 // enabled. 77 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) { 78 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) { 79 EmitSimpleOMPExecutableDirective(*D); 80 return; 81 } 82 } 83 84 switch (S->getStmtClass()) { 85 case Stmt::NoStmtClass: 86 case Stmt::CXXCatchStmtClass: 87 case Stmt::SEHExceptStmtClass: 88 case Stmt::SEHFinallyStmtClass: 89 case Stmt::MSDependentExistsStmtClass: 90 llvm_unreachable("invalid statement class to emit generically"); 91 case Stmt::NullStmtClass: 92 case Stmt::CompoundStmtClass: 93 case Stmt::DeclStmtClass: 94 case Stmt::LabelStmtClass: 95 case Stmt::AttributedStmtClass: 96 case Stmt::GotoStmtClass: 97 case Stmt::BreakStmtClass: 98 case Stmt::ContinueStmtClass: 99 case Stmt::DefaultStmtClass: 100 case Stmt::CaseStmtClass: 101 case Stmt::SEHLeaveStmtClass: 102 llvm_unreachable("should have emitted these statements as simple"); 103 104 #define STMT(Type, Base) 105 #define ABSTRACT_STMT(Op) 106 #define EXPR(Type, Base) \ 107 case Stmt::Type##Class: 108 #include "clang/AST/StmtNodes.inc" 109 { 110 // Remember the block we came in on. 111 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 112 assert(incoming && "expression emission must have an insertion point"); 113 114 EmitIgnoredExpr(cast<Expr>(S)); 115 116 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 117 assert(outgoing && "expression emission cleared block!"); 118 119 // The expression emitters assume (reasonably!) that the insertion 120 // point is always set. To maintain that, the call-emission code 121 // for noreturn functions has to enter a new block with no 122 // predecessors. We want to kill that block and mark the current 123 // insertion point unreachable in the common case of a call like 124 // "exit();". Since expression emission doesn't otherwise create 125 // blocks with no predecessors, we can just test for that. 126 // However, we must be careful not to do this to our incoming 127 // block, because *statement* emission does sometimes create 128 // reachable blocks which will have no predecessors until later in 129 // the function. This occurs with, e.g., labels that are not 130 // reachable by fallthrough. 131 if (incoming != outgoing && outgoing->use_empty()) { 132 outgoing->eraseFromParent(); 133 Builder.ClearInsertionPoint(); 134 } 135 break; 136 } 137 138 case Stmt::IndirectGotoStmtClass: 139 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 140 141 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 142 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break; 143 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break; 144 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break; 145 146 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 147 148 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 149 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 150 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 151 case Stmt::CoroutineBodyStmtClass: 152 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S)); 153 break; 154 case Stmt::CoreturnStmtClass: 155 EmitCoreturnStmt(cast<CoreturnStmt>(*S)); 156 break; 157 case Stmt::CapturedStmtClass: { 158 const CapturedStmt *CS = cast<CapturedStmt>(S); 159 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 160 } 161 break; 162 case Stmt::ObjCAtTryStmtClass: 163 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 164 break; 165 case Stmt::ObjCAtCatchStmtClass: 166 llvm_unreachable( 167 "@catch statements should be handled by EmitObjCAtTryStmt"); 168 case Stmt::ObjCAtFinallyStmtClass: 169 llvm_unreachable( 170 "@finally statements should be handled by EmitObjCAtTryStmt"); 171 case Stmt::ObjCAtThrowStmtClass: 172 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 173 break; 174 case Stmt::ObjCAtSynchronizedStmtClass: 175 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 176 break; 177 case Stmt::ObjCForCollectionStmtClass: 178 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 179 break; 180 case Stmt::ObjCAutoreleasePoolStmtClass: 181 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 182 break; 183 184 case Stmt::CXXTryStmtClass: 185 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 186 break; 187 case Stmt::CXXForRangeStmtClass: 188 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs); 189 break; 190 case Stmt::SEHTryStmtClass: 191 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 192 break; 193 case Stmt::OMPParallelDirectiveClass: 194 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); 195 break; 196 case Stmt::OMPSimdDirectiveClass: 197 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); 198 break; 199 case Stmt::OMPForDirectiveClass: 200 EmitOMPForDirective(cast<OMPForDirective>(*S)); 201 break; 202 case Stmt::OMPForSimdDirectiveClass: 203 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); 204 break; 205 case Stmt::OMPSectionsDirectiveClass: 206 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); 207 break; 208 case Stmt::OMPSectionDirectiveClass: 209 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); 210 break; 211 case Stmt::OMPSingleDirectiveClass: 212 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); 213 break; 214 case Stmt::OMPMasterDirectiveClass: 215 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); 216 break; 217 case Stmt::OMPCriticalDirectiveClass: 218 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); 219 break; 220 case Stmt::OMPParallelForDirectiveClass: 221 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); 222 break; 223 case Stmt::OMPParallelForSimdDirectiveClass: 224 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); 225 break; 226 case Stmt::OMPParallelMasterDirectiveClass: 227 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S)); 228 break; 229 case Stmt::OMPParallelSectionsDirectiveClass: 230 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); 231 break; 232 case Stmt::OMPTaskDirectiveClass: 233 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); 234 break; 235 case Stmt::OMPTaskyieldDirectiveClass: 236 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); 237 break; 238 case Stmt::OMPBarrierDirectiveClass: 239 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); 240 break; 241 case Stmt::OMPTaskwaitDirectiveClass: 242 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); 243 break; 244 case Stmt::OMPTaskgroupDirectiveClass: 245 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); 246 break; 247 case Stmt::OMPFlushDirectiveClass: 248 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); 249 break; 250 case Stmt::OMPDepobjDirectiveClass: 251 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S)); 252 break; 253 case Stmt::OMPOrderedDirectiveClass: 254 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); 255 break; 256 case Stmt::OMPAtomicDirectiveClass: 257 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); 258 break; 259 case Stmt::OMPTargetDirectiveClass: 260 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); 261 break; 262 case Stmt::OMPTeamsDirectiveClass: 263 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); 264 break; 265 case Stmt::OMPCancellationPointDirectiveClass: 266 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); 267 break; 268 case Stmt::OMPCancelDirectiveClass: 269 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); 270 break; 271 case Stmt::OMPTargetDataDirectiveClass: 272 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S)); 273 break; 274 case Stmt::OMPTargetEnterDataDirectiveClass: 275 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S)); 276 break; 277 case Stmt::OMPTargetExitDataDirectiveClass: 278 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S)); 279 break; 280 case Stmt::OMPTargetParallelDirectiveClass: 281 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S)); 282 break; 283 case Stmt::OMPTargetParallelForDirectiveClass: 284 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S)); 285 break; 286 case Stmt::OMPTaskLoopDirectiveClass: 287 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S)); 288 break; 289 case Stmt::OMPTaskLoopSimdDirectiveClass: 290 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S)); 291 break; 292 case Stmt::OMPMasterTaskLoopDirectiveClass: 293 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S)); 294 break; 295 case Stmt::OMPMasterTaskLoopSimdDirectiveClass: 296 EmitOMPMasterTaskLoopSimdDirective( 297 cast<OMPMasterTaskLoopSimdDirective>(*S)); 298 break; 299 case Stmt::OMPParallelMasterTaskLoopDirectiveClass: 300 EmitOMPParallelMasterTaskLoopDirective( 301 cast<OMPParallelMasterTaskLoopDirective>(*S)); 302 break; 303 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: 304 EmitOMPParallelMasterTaskLoopSimdDirective( 305 cast<OMPParallelMasterTaskLoopSimdDirective>(*S)); 306 break; 307 case Stmt::OMPDistributeDirectiveClass: 308 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S)); 309 break; 310 case Stmt::OMPTargetUpdateDirectiveClass: 311 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S)); 312 break; 313 case Stmt::OMPDistributeParallelForDirectiveClass: 314 EmitOMPDistributeParallelForDirective( 315 cast<OMPDistributeParallelForDirective>(*S)); 316 break; 317 case Stmt::OMPDistributeParallelForSimdDirectiveClass: 318 EmitOMPDistributeParallelForSimdDirective( 319 cast<OMPDistributeParallelForSimdDirective>(*S)); 320 break; 321 case Stmt::OMPDistributeSimdDirectiveClass: 322 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S)); 323 break; 324 case Stmt::OMPTargetParallelForSimdDirectiveClass: 325 EmitOMPTargetParallelForSimdDirective( 326 cast<OMPTargetParallelForSimdDirective>(*S)); 327 break; 328 case Stmt::OMPTargetSimdDirectiveClass: 329 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S)); 330 break; 331 case Stmt::OMPTeamsDistributeDirectiveClass: 332 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S)); 333 break; 334 case Stmt::OMPTeamsDistributeSimdDirectiveClass: 335 EmitOMPTeamsDistributeSimdDirective( 336 cast<OMPTeamsDistributeSimdDirective>(*S)); 337 break; 338 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: 339 EmitOMPTeamsDistributeParallelForSimdDirective( 340 cast<OMPTeamsDistributeParallelForSimdDirective>(*S)); 341 break; 342 case Stmt::OMPTeamsDistributeParallelForDirectiveClass: 343 EmitOMPTeamsDistributeParallelForDirective( 344 cast<OMPTeamsDistributeParallelForDirective>(*S)); 345 break; 346 case Stmt::OMPTargetTeamsDirectiveClass: 347 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S)); 348 break; 349 case Stmt::OMPTargetTeamsDistributeDirectiveClass: 350 EmitOMPTargetTeamsDistributeDirective( 351 cast<OMPTargetTeamsDistributeDirective>(*S)); 352 break; 353 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: 354 EmitOMPTargetTeamsDistributeParallelForDirective( 355 cast<OMPTargetTeamsDistributeParallelForDirective>(*S)); 356 break; 357 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: 358 EmitOMPTargetTeamsDistributeParallelForSimdDirective( 359 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S)); 360 break; 361 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: 362 EmitOMPTargetTeamsDistributeSimdDirective( 363 cast<OMPTargetTeamsDistributeSimdDirective>(*S)); 364 break; 365 } 366 } 367 368 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { 369 switch (S->getStmtClass()) { 370 default: return false; 371 case Stmt::NullStmtClass: break; 372 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; 373 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; 374 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; 375 case Stmt::AttributedStmtClass: 376 EmitAttributedStmt(cast<AttributedStmt>(*S)); break; 377 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; 378 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break; 379 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; 380 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; 381 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; 382 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break; 383 } 384 385 return true; 386 } 387 388 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 389 /// this captures the expression result of the last sub-statement and returns it 390 /// (for use by the statement expression extension). 391 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 392 AggValueSlot AggSlot) { 393 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 394 "LLVM IR generation of compound statement ('{}')"); 395 396 // Keep track of the current cleanup stack depth, including debug scopes. 397 LexicalScope Scope(*this, S.getSourceRange()); 398 399 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 400 } 401 402 Address 403 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 404 bool GetLast, 405 AggValueSlot AggSlot) { 406 407 const Stmt *ExprResult = S.getStmtExprResult(); 408 assert((!GetLast || (GetLast && ExprResult)) && 409 "If GetLast is true then the CompoundStmt must have a StmtExprResult"); 410 411 Address RetAlloca = Address::invalid(); 412 413 for (auto *CurStmt : S.body()) { 414 if (GetLast && ExprResult == CurStmt) { 415 // We have to special case labels here. They are statements, but when put 416 // at the end of a statement expression, they yield the value of their 417 // subexpression. Handle this by walking through all labels we encounter, 418 // emitting them before we evaluate the subexpr. 419 // Similar issues arise for attributed statements. 420 while (!isa<Expr>(ExprResult)) { 421 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) { 422 EmitLabel(LS->getDecl()); 423 ExprResult = LS->getSubStmt(); 424 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) { 425 // FIXME: Update this if we ever have attributes that affect the 426 // semantics of an expression. 427 ExprResult = AS->getSubStmt(); 428 } else { 429 llvm_unreachable("unknown value statement"); 430 } 431 } 432 433 EnsureInsertPoint(); 434 435 const Expr *E = cast<Expr>(ExprResult); 436 QualType ExprTy = E->getType(); 437 if (hasAggregateEvaluationKind(ExprTy)) { 438 EmitAggExpr(E, AggSlot); 439 } else { 440 // We can't return an RValue here because there might be cleanups at 441 // the end of the StmtExpr. Because of that, we have to emit the result 442 // here into a temporary alloca. 443 RetAlloca = CreateMemTemp(ExprTy); 444 EmitAnyExprToMem(E, RetAlloca, Qualifiers(), 445 /*IsInit*/ false); 446 } 447 } else { 448 EmitStmt(CurStmt); 449 } 450 } 451 452 return RetAlloca; 453 } 454 455 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 456 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 457 458 // If there is a cleanup stack, then we it isn't worth trying to 459 // simplify this block (we would need to remove it from the scope map 460 // and cleanup entry). 461 if (!EHStack.empty()) 462 return; 463 464 // Can only simplify direct branches. 465 if (!BI || !BI->isUnconditional()) 466 return; 467 468 // Can only simplify empty blocks. 469 if (BI->getIterator() != BB->begin()) 470 return; 471 472 BB->replaceAllUsesWith(BI->getSuccessor(0)); 473 BI->eraseFromParent(); 474 BB->eraseFromParent(); 475 } 476 477 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 478 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 479 480 // Fall out of the current block (if necessary). 481 EmitBranch(BB); 482 483 if (IsFinished && BB->use_empty()) { 484 delete BB; 485 return; 486 } 487 488 // Place the block after the current block, if possible, or else at 489 // the end of the function. 490 if (CurBB && CurBB->getParent()) 491 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB); 492 else 493 CurFn->getBasicBlockList().push_back(BB); 494 Builder.SetInsertPoint(BB); 495 } 496 497 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 498 // Emit a branch from the current block to the target one if this 499 // was a real block. If this was just a fall-through block after a 500 // terminator, don't emit it. 501 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 502 503 if (!CurBB || CurBB->getTerminator()) { 504 // If there is no insert point or the previous block is already 505 // terminated, don't touch it. 506 } else { 507 // Otherwise, create a fall-through branch. 508 Builder.CreateBr(Target); 509 } 510 511 Builder.ClearInsertionPoint(); 512 } 513 514 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 515 bool inserted = false; 516 for (llvm::User *u : block->users()) { 517 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { 518 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(), 519 block); 520 inserted = true; 521 break; 522 } 523 } 524 525 if (!inserted) 526 CurFn->getBasicBlockList().push_back(block); 527 528 Builder.SetInsertPoint(block); 529 } 530 531 CodeGenFunction::JumpDest 532 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 533 JumpDest &Dest = LabelMap[D]; 534 if (Dest.isValid()) return Dest; 535 536 // Create, but don't insert, the new block. 537 Dest = JumpDest(createBasicBlock(D->getName()), 538 EHScopeStack::stable_iterator::invalid(), 539 NextCleanupDestIndex++); 540 return Dest; 541 } 542 543 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 544 // Add this label to the current lexical scope if we're within any 545 // normal cleanups. Jumps "in" to this label --- when permitted by 546 // the language --- may need to be routed around such cleanups. 547 if (EHStack.hasNormalCleanups() && CurLexicalScope) 548 CurLexicalScope->addLabel(D); 549 550 JumpDest &Dest = LabelMap[D]; 551 552 // If we didn't need a forward reference to this label, just go 553 // ahead and create a destination at the current scope. 554 if (!Dest.isValid()) { 555 Dest = getJumpDestInCurrentScope(D->getName()); 556 557 // Otherwise, we need to give this label a target depth and remove 558 // it from the branch-fixups list. 559 } else { 560 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 561 Dest.setScopeDepth(EHStack.stable_begin()); 562 ResolveBranchFixups(Dest.getBlock()); 563 } 564 565 EmitBlock(Dest.getBlock()); 566 567 // Emit debug info for labels. 568 if (CGDebugInfo *DI = getDebugInfo()) { 569 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { 570 DI->setLocation(D->getLocation()); 571 DI->EmitLabel(D, Builder); 572 } 573 } 574 575 incrementProfileCounter(D->getStmt()); 576 } 577 578 /// Change the cleanup scope of the labels in this lexical scope to 579 /// match the scope of the enclosing context. 580 void CodeGenFunction::LexicalScope::rescopeLabels() { 581 assert(!Labels.empty()); 582 EHScopeStack::stable_iterator innermostScope 583 = CGF.EHStack.getInnermostNormalCleanup(); 584 585 // Change the scope depth of all the labels. 586 for (SmallVectorImpl<const LabelDecl*>::const_iterator 587 i = Labels.begin(), e = Labels.end(); i != e; ++i) { 588 assert(CGF.LabelMap.count(*i)); 589 JumpDest &dest = CGF.LabelMap.find(*i)->second; 590 assert(dest.getScopeDepth().isValid()); 591 assert(innermostScope.encloses(dest.getScopeDepth())); 592 dest.setScopeDepth(innermostScope); 593 } 594 595 // Reparent the labels if the new scope also has cleanups. 596 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 597 ParentScope->Labels.append(Labels.begin(), Labels.end()); 598 } 599 } 600 601 602 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 603 EmitLabel(S.getDecl()); 604 EmitStmt(S.getSubStmt()); 605 } 606 607 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 608 EmitStmt(S.getSubStmt(), S.getAttrs()); 609 } 610 611 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 612 // If this code is reachable then emit a stop point (if generating 613 // debug info). We have to do this ourselves because we are on the 614 // "simple" statement path. 615 if (HaveInsertPoint()) 616 EmitStopPoint(&S); 617 618 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 619 } 620 621 622 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 623 if (const LabelDecl *Target = S.getConstantTarget()) { 624 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 625 return; 626 } 627 628 // Ensure that we have an i8* for our PHI node. 629 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 630 Int8PtrTy, "addr"); 631 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 632 633 // Get the basic block for the indirect goto. 634 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 635 636 // The first instruction in the block has to be the PHI for the switch dest, 637 // add an entry for this branch. 638 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 639 640 EmitBranch(IndGotoBB); 641 } 642 643 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 644 // C99 6.8.4.1: The first substatement is executed if the expression compares 645 // unequal to 0. The condition must be a scalar type. 646 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); 647 648 if (S.getInit()) 649 EmitStmt(S.getInit()); 650 651 if (S.getConditionVariable()) 652 EmitDecl(*S.getConditionVariable()); 653 654 // If the condition constant folds and can be elided, try to avoid emitting 655 // the condition and the dead arm of the if/else. 656 bool CondConstant; 657 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, 658 S.isConstexpr())) { 659 // Figure out which block (then or else) is executed. 660 const Stmt *Executed = S.getThen(); 661 const Stmt *Skipped = S.getElse(); 662 if (!CondConstant) // Condition false? 663 std::swap(Executed, Skipped); 664 665 // If the skipped block has no labels in it, just emit the executed block. 666 // This avoids emitting dead code and simplifies the CFG substantially. 667 if (S.isConstexpr() || !ContainsLabel(Skipped)) { 668 if (CondConstant) 669 incrementProfileCounter(&S); 670 if (Executed) { 671 RunCleanupsScope ExecutedScope(*this); 672 EmitStmt(Executed); 673 } 674 return; 675 } 676 } 677 678 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 679 // the conditional branch. 680 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 681 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 682 llvm::BasicBlock *ElseBlock = ContBlock; 683 if (S.getElse()) 684 ElseBlock = createBasicBlock("if.else"); 685 686 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, 687 getProfileCount(S.getThen())); 688 689 // Emit the 'then' code. 690 EmitBlock(ThenBlock); 691 incrementProfileCounter(&S); 692 { 693 RunCleanupsScope ThenScope(*this); 694 EmitStmt(S.getThen()); 695 } 696 EmitBranch(ContBlock); 697 698 // Emit the 'else' code if present. 699 if (const Stmt *Else = S.getElse()) { 700 { 701 // There is no need to emit line number for an unconditional branch. 702 auto NL = ApplyDebugLocation::CreateEmpty(*this); 703 EmitBlock(ElseBlock); 704 } 705 { 706 RunCleanupsScope ElseScope(*this); 707 EmitStmt(Else); 708 } 709 { 710 // There is no need to emit line number for an unconditional branch. 711 auto NL = ApplyDebugLocation::CreateEmpty(*this); 712 EmitBranch(ContBlock); 713 } 714 } 715 716 // Emit the continuation block for code after the if. 717 EmitBlock(ContBlock, true); 718 } 719 720 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, 721 ArrayRef<const Attr *> WhileAttrs) { 722 // Emit the header for the loop, which will also become 723 // the continue target. 724 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 725 EmitBlock(LoopHeader.getBlock()); 726 727 const SourceRange &R = S.getSourceRange(); 728 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs, 729 SourceLocToDebugLoc(R.getBegin()), 730 SourceLocToDebugLoc(R.getEnd())); 731 732 // Create an exit block for when the condition fails, which will 733 // also become the break target. 734 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 735 736 // Store the blocks to use for break and continue. 737 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 738 739 // C++ [stmt.while]p2: 740 // When the condition of a while statement is a declaration, the 741 // scope of the variable that is declared extends from its point 742 // of declaration (3.3.2) to the end of the while statement. 743 // [...] 744 // The object created in a condition is destroyed and created 745 // with each iteration of the loop. 746 RunCleanupsScope ConditionScope(*this); 747 748 if (S.getConditionVariable()) 749 EmitDecl(*S.getConditionVariable()); 750 751 // Evaluate the conditional in the while header. C99 6.8.5.1: The 752 // evaluation of the controlling expression takes place before each 753 // execution of the loop body. 754 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 755 756 // while(1) is common, avoid extra exit blocks. Be sure 757 // to correctly handle break/continue though. 758 bool EmitBoolCondBranch = true; 759 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 760 if (C->isOne()) 761 EmitBoolCondBranch = false; 762 763 // As long as the condition is true, go to the loop body. 764 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 765 if (EmitBoolCondBranch) { 766 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 767 if (ConditionScope.requiresCleanups()) 768 ExitBlock = createBasicBlock("while.exit"); 769 Builder.CreateCondBr( 770 BoolCondVal, LoopBody, ExitBlock, 771 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 772 773 if (ExitBlock != LoopExit.getBlock()) { 774 EmitBlock(ExitBlock); 775 EmitBranchThroughCleanup(LoopExit); 776 } 777 } 778 779 // Emit the loop body. We have to emit this in a cleanup scope 780 // because it might be a singleton DeclStmt. 781 { 782 RunCleanupsScope BodyScope(*this); 783 EmitBlock(LoopBody); 784 incrementProfileCounter(&S); 785 EmitStmt(S.getBody()); 786 } 787 788 BreakContinueStack.pop_back(); 789 790 // Immediately force cleanup. 791 ConditionScope.ForceCleanup(); 792 793 EmitStopPoint(&S); 794 // Branch to the loop header again. 795 EmitBranch(LoopHeader.getBlock()); 796 797 LoopStack.pop(); 798 799 // Emit the exit block. 800 EmitBlock(LoopExit.getBlock(), true); 801 802 // The LoopHeader typically is just a branch if we skipped emitting 803 // a branch, try to erase it. 804 if (!EmitBoolCondBranch) 805 SimplifyForwardingBlocks(LoopHeader.getBlock()); 806 } 807 808 void CodeGenFunction::EmitDoStmt(const DoStmt &S, 809 ArrayRef<const Attr *> DoAttrs) { 810 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 811 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 812 813 uint64_t ParentCount = getCurrentProfileCount(); 814 815 // Store the blocks to use for break and continue. 816 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 817 818 // Emit the body of the loop. 819 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 820 821 EmitBlockWithFallThrough(LoopBody, &S); 822 { 823 RunCleanupsScope BodyScope(*this); 824 EmitStmt(S.getBody()); 825 } 826 827 EmitBlock(LoopCond.getBlock()); 828 829 const SourceRange &R = S.getSourceRange(); 830 LoopStack.push(LoopBody, CGM.getContext(), DoAttrs, 831 SourceLocToDebugLoc(R.getBegin()), 832 SourceLocToDebugLoc(R.getEnd())); 833 834 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 835 // after each execution of the loop body." 836 837 // Evaluate the conditional in the while header. 838 // C99 6.8.5p2/p4: The first substatement is executed if the expression 839 // compares unequal to 0. The condition must be a scalar type. 840 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 841 842 BreakContinueStack.pop_back(); 843 844 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 845 // to correctly handle break/continue though. 846 bool EmitBoolCondBranch = true; 847 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 848 if (C->isZero()) 849 EmitBoolCondBranch = false; 850 851 // As long as the condition is true, iterate the loop. 852 if (EmitBoolCondBranch) { 853 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; 854 Builder.CreateCondBr( 855 BoolCondVal, LoopBody, LoopExit.getBlock(), 856 createProfileWeightsForLoop(S.getCond(), BackedgeCount)); 857 } 858 859 LoopStack.pop(); 860 861 // Emit the exit block. 862 EmitBlock(LoopExit.getBlock()); 863 864 // The DoCond block typically is just a branch if we skipped 865 // emitting a branch, try to erase it. 866 if (!EmitBoolCondBranch) 867 SimplifyForwardingBlocks(LoopCond.getBlock()); 868 } 869 870 void CodeGenFunction::EmitForStmt(const ForStmt &S, 871 ArrayRef<const Attr *> ForAttrs) { 872 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 873 874 LexicalScope ForScope(*this, S.getSourceRange()); 875 876 // Evaluate the first part before the loop. 877 if (S.getInit()) 878 EmitStmt(S.getInit()); 879 880 // Start the loop with a block that tests the condition. 881 // If there's an increment, the continue scope will be overwritten 882 // later. 883 JumpDest Continue = getJumpDestInCurrentScope("for.cond"); 884 llvm::BasicBlock *CondBlock = Continue.getBlock(); 885 EmitBlock(CondBlock); 886 887 const SourceRange &R = S.getSourceRange(); 888 LoopStack.push(CondBlock, CGM.getContext(), ForAttrs, 889 SourceLocToDebugLoc(R.getBegin()), 890 SourceLocToDebugLoc(R.getEnd())); 891 892 // If the for loop doesn't have an increment we can just use the 893 // condition as the continue block. Otherwise we'll need to create 894 // a block for it (in the current scope, i.e. in the scope of the 895 // condition), and that we will become our continue block. 896 if (S.getInc()) 897 Continue = getJumpDestInCurrentScope("for.inc"); 898 899 // Store the blocks to use for break and continue. 900 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 901 902 // Create a cleanup scope for the condition variable cleanups. 903 LexicalScope ConditionScope(*this, S.getSourceRange()); 904 905 if (S.getCond()) { 906 // If the for statement has a condition scope, emit the local variable 907 // declaration. 908 if (S.getConditionVariable()) { 909 EmitDecl(*S.getConditionVariable()); 910 } 911 912 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 913 // If there are any cleanups between here and the loop-exit scope, 914 // create a block to stage a loop exit along. 915 if (ForScope.requiresCleanups()) 916 ExitBlock = createBasicBlock("for.cond.cleanup"); 917 918 // As long as the condition is true, iterate the loop. 919 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 920 921 // C99 6.8.5p2/p4: The first substatement is executed if the expression 922 // compares unequal to 0. The condition must be a scalar type. 923 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 924 Builder.CreateCondBr( 925 BoolCondVal, ForBody, ExitBlock, 926 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 927 928 if (ExitBlock != LoopExit.getBlock()) { 929 EmitBlock(ExitBlock); 930 EmitBranchThroughCleanup(LoopExit); 931 } 932 933 EmitBlock(ForBody); 934 } else { 935 // Treat it as a non-zero constant. Don't even create a new block for the 936 // body, just fall into it. 937 } 938 incrementProfileCounter(&S); 939 940 { 941 // Create a separate cleanup scope for the body, in case it is not 942 // a compound statement. 943 RunCleanupsScope BodyScope(*this); 944 EmitStmt(S.getBody()); 945 } 946 947 // If there is an increment, emit it next. 948 if (S.getInc()) { 949 EmitBlock(Continue.getBlock()); 950 EmitStmt(S.getInc()); 951 } 952 953 BreakContinueStack.pop_back(); 954 955 ConditionScope.ForceCleanup(); 956 957 EmitStopPoint(&S); 958 EmitBranch(CondBlock); 959 960 ForScope.ForceCleanup(); 961 962 LoopStack.pop(); 963 964 // Emit the fall-through block. 965 EmitBlock(LoopExit.getBlock(), true); 966 } 967 968 void 969 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, 970 ArrayRef<const Attr *> ForAttrs) { 971 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 972 973 LexicalScope ForScope(*this, S.getSourceRange()); 974 975 // Evaluate the first pieces before the loop. 976 if (S.getInit()) 977 EmitStmt(S.getInit()); 978 EmitStmt(S.getRangeStmt()); 979 EmitStmt(S.getBeginStmt()); 980 EmitStmt(S.getEndStmt()); 981 982 // Start the loop with a block that tests the condition. 983 // If there's an increment, the continue scope will be overwritten 984 // later. 985 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 986 EmitBlock(CondBlock); 987 988 const SourceRange &R = S.getSourceRange(); 989 LoopStack.push(CondBlock, CGM.getContext(), ForAttrs, 990 SourceLocToDebugLoc(R.getBegin()), 991 SourceLocToDebugLoc(R.getEnd())); 992 993 // If there are any cleanups between here and the loop-exit scope, 994 // create a block to stage a loop exit along. 995 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 996 if (ForScope.requiresCleanups()) 997 ExitBlock = createBasicBlock("for.cond.cleanup"); 998 999 // The loop body, consisting of the specified body and the loop variable. 1000 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 1001 1002 // The body is executed if the expression, contextually converted 1003 // to bool, is true. 1004 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1005 Builder.CreateCondBr( 1006 BoolCondVal, ForBody, ExitBlock, 1007 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 1008 1009 if (ExitBlock != LoopExit.getBlock()) { 1010 EmitBlock(ExitBlock); 1011 EmitBranchThroughCleanup(LoopExit); 1012 } 1013 1014 EmitBlock(ForBody); 1015 incrementProfileCounter(&S); 1016 1017 // Create a block for the increment. In case of a 'continue', we jump there. 1018 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 1019 1020 // Store the blocks to use for break and continue. 1021 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1022 1023 { 1024 // Create a separate cleanup scope for the loop variable and body. 1025 LexicalScope BodyScope(*this, S.getSourceRange()); 1026 EmitStmt(S.getLoopVarStmt()); 1027 EmitStmt(S.getBody()); 1028 } 1029 1030 EmitStopPoint(&S); 1031 // If there is an increment, emit it next. 1032 EmitBlock(Continue.getBlock()); 1033 EmitStmt(S.getInc()); 1034 1035 BreakContinueStack.pop_back(); 1036 1037 EmitBranch(CondBlock); 1038 1039 ForScope.ForceCleanup(); 1040 1041 LoopStack.pop(); 1042 1043 // Emit the fall-through block. 1044 EmitBlock(LoopExit.getBlock(), true); 1045 } 1046 1047 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 1048 if (RV.isScalar()) { 1049 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 1050 } else if (RV.isAggregate()) { 1051 LValue Dest = MakeAddrLValue(ReturnValue, Ty); 1052 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty); 1053 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); 1054 } else { 1055 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty), 1056 /*init*/ true); 1057 } 1058 EmitBranchThroughCleanup(ReturnBlock); 1059 } 1060 1061 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 1062 /// if the function returns void, or may be missing one if the function returns 1063 /// non-void. Fun stuff :). 1064 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 1065 if (requiresReturnValueCheck()) { 1066 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc()); 1067 auto *SLocPtr = 1068 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false, 1069 llvm::GlobalVariable::PrivateLinkage, SLoc); 1070 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1071 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr); 1072 assert(ReturnLocation.isValid() && "No valid return location"); 1073 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy), 1074 ReturnLocation); 1075 } 1076 1077 // Returning from an outlined SEH helper is UB, and we already warn on it. 1078 if (IsOutlinedSEHHelper) { 1079 Builder.CreateUnreachable(); 1080 Builder.ClearInsertionPoint(); 1081 } 1082 1083 // Emit the result value, even if unused, to evaluate the side effects. 1084 const Expr *RV = S.getRetValue(); 1085 1086 // Treat block literals in a return expression as if they appeared 1087 // in their own scope. This permits a small, easily-implemented 1088 // exception to our over-conservative rules about not jumping to 1089 // statements following block literals with non-trivial cleanups. 1090 RunCleanupsScope cleanupScope(*this); 1091 if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) { 1092 enterFullExpression(fe); 1093 RV = fe->getSubExpr(); 1094 } 1095 1096 // FIXME: Clean this up by using an LValue for ReturnTemp, 1097 // EmitStoreThroughLValue, and EmitAnyExpr. 1098 if (getLangOpts().ElideConstructors && 1099 S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { 1100 // Apply the named return value optimization for this return statement, 1101 // which means doing nothing: the appropriate result has already been 1102 // constructed into the NRVO variable. 1103 1104 // If there is an NRVO flag for this variable, set it to 1 into indicate 1105 // that the cleanup code should not destroy the variable. 1106 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 1107 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag); 1108 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { 1109 // Make sure not to return anything, but evaluate the expression 1110 // for side effects. 1111 if (RV) 1112 EmitAnyExpr(RV); 1113 } else if (!RV) { 1114 // Do nothing (return value is left uninitialized) 1115 } else if (FnRetTy->isReferenceType()) { 1116 // If this function returns a reference, take the address of the expression 1117 // rather than the value. 1118 RValue Result = EmitReferenceBindingToExpr(RV); 1119 Builder.CreateStore(Result.getScalarVal(), ReturnValue); 1120 } else { 1121 switch (getEvaluationKind(RV->getType())) { 1122 case TEK_Scalar: 1123 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); 1124 break; 1125 case TEK_Complex: 1126 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()), 1127 /*isInit*/ true); 1128 break; 1129 case TEK_Aggregate: 1130 EmitAggExpr(RV, AggValueSlot::forAddr( 1131 ReturnValue, Qualifiers(), 1132 AggValueSlot::IsDestructed, 1133 AggValueSlot::DoesNotNeedGCBarriers, 1134 AggValueSlot::IsNotAliased, 1135 getOverlapForReturnValue())); 1136 break; 1137 } 1138 } 1139 1140 ++NumReturnExprs; 1141 if (!RV || RV->isEvaluatable(getContext())) 1142 ++NumSimpleReturnExprs; 1143 1144 cleanupScope.ForceCleanup(); 1145 EmitBranchThroughCleanup(ReturnBlock); 1146 } 1147 1148 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 1149 // As long as debug info is modeled with instructions, we have to ensure we 1150 // have a place to insert here and write the stop point here. 1151 if (HaveInsertPoint()) 1152 EmitStopPoint(&S); 1153 1154 for (const auto *I : S.decls()) 1155 EmitDecl(*I); 1156 } 1157 1158 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 1159 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 1160 1161 // If this code is reachable then emit a stop point (if generating 1162 // debug info). We have to do this ourselves because we are on the 1163 // "simple" statement path. 1164 if (HaveInsertPoint()) 1165 EmitStopPoint(&S); 1166 1167 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); 1168 } 1169 1170 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 1171 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 1172 1173 // If this code is reachable then emit a stop point (if generating 1174 // debug info). We have to do this ourselves because we are on the 1175 // "simple" statement path. 1176 if (HaveInsertPoint()) 1177 EmitStopPoint(&S); 1178 1179 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); 1180 } 1181 1182 /// EmitCaseStmtRange - If case statement range is not too big then 1183 /// add multiple cases to switch instruction, one for each value within 1184 /// the range. If range is too big then emit "if" condition check. 1185 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { 1186 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 1187 1188 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 1189 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 1190 1191 // Emit the code for this case. We do this first to make sure it is 1192 // properly chained from our predecessor before generating the 1193 // switch machinery to enter this block. 1194 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1195 EmitBlockWithFallThrough(CaseDest, &S); 1196 EmitStmt(S.getSubStmt()); 1197 1198 // If range is empty, do nothing. 1199 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 1200 return; 1201 1202 llvm::APInt Range = RHS - LHS; 1203 // FIXME: parameters such as this should not be hardcoded. 1204 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 1205 // Range is small enough to add multiple switch instruction cases. 1206 uint64_t Total = getProfileCount(&S); 1207 unsigned NCases = Range.getZExtValue() + 1; 1208 // We only have one region counter for the entire set of cases here, so we 1209 // need to divide the weights evenly between the generated cases, ensuring 1210 // that the total weight is preserved. E.g., a weight of 5 over three cases 1211 // will be distributed as weights of 2, 2, and 1. 1212 uint64_t Weight = Total / NCases, Rem = Total % NCases; 1213 for (unsigned I = 0; I != NCases; ++I) { 1214 if (SwitchWeights) 1215 SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); 1216 if (Rem) 1217 Rem--; 1218 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 1219 ++LHS; 1220 } 1221 return; 1222 } 1223 1224 // The range is too big. Emit "if" condition into a new block, 1225 // making sure to save and restore the current insertion point. 1226 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 1227 1228 // Push this test onto the chain of range checks (which terminates 1229 // in the default basic block). The switch's default will be changed 1230 // to the top of this chain after switch emission is complete. 1231 llvm::BasicBlock *FalseDest = CaseRangeBlock; 1232 CaseRangeBlock = createBasicBlock("sw.caserange"); 1233 1234 CurFn->getBasicBlockList().push_back(CaseRangeBlock); 1235 Builder.SetInsertPoint(CaseRangeBlock); 1236 1237 // Emit range check. 1238 llvm::Value *Diff = 1239 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 1240 llvm::Value *Cond = 1241 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 1242 1243 llvm::MDNode *Weights = nullptr; 1244 if (SwitchWeights) { 1245 uint64_t ThisCount = getProfileCount(&S); 1246 uint64_t DefaultCount = (*SwitchWeights)[0]; 1247 Weights = createProfileWeights(ThisCount, DefaultCount); 1248 1249 // Since we're chaining the switch default through each large case range, we 1250 // need to update the weight for the default, ie, the first case, to include 1251 // this case. 1252 (*SwitchWeights)[0] += ThisCount; 1253 } 1254 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); 1255 1256 // Restore the appropriate insertion point. 1257 if (RestoreBB) 1258 Builder.SetInsertPoint(RestoreBB); 1259 else 1260 Builder.ClearInsertionPoint(); 1261 } 1262 1263 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { 1264 // If there is no enclosing switch instance that we're aware of, then this 1265 // case statement and its block can be elided. This situation only happens 1266 // when we've constant-folded the switch, are emitting the constant case, 1267 // and part of the constant case includes another case statement. For 1268 // instance: switch (4) { case 4: do { case 5: } while (1); } 1269 if (!SwitchInsn) { 1270 EmitStmt(S.getSubStmt()); 1271 return; 1272 } 1273 1274 // Handle case ranges. 1275 if (S.getRHS()) { 1276 EmitCaseStmtRange(S); 1277 return; 1278 } 1279 1280 llvm::ConstantInt *CaseVal = 1281 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 1282 1283 // If the body of the case is just a 'break', try to not emit an empty block. 1284 // If we're profiling or we're not optimizing, leave the block in for better 1285 // debug and coverage analysis. 1286 if (!CGM.getCodeGenOpts().hasProfileClangInstr() && 1287 CGM.getCodeGenOpts().OptimizationLevel > 0 && 1288 isa<BreakStmt>(S.getSubStmt())) { 1289 JumpDest Block = BreakContinueStack.back().BreakBlock; 1290 1291 // Only do this optimization if there are no cleanups that need emitting. 1292 if (isObviouslyBranchWithoutCleanups(Block)) { 1293 if (SwitchWeights) 1294 SwitchWeights->push_back(getProfileCount(&S)); 1295 SwitchInsn->addCase(CaseVal, Block.getBlock()); 1296 1297 // If there was a fallthrough into this case, make sure to redirect it to 1298 // the end of the switch as well. 1299 if (Builder.GetInsertBlock()) { 1300 Builder.CreateBr(Block.getBlock()); 1301 Builder.ClearInsertionPoint(); 1302 } 1303 return; 1304 } 1305 } 1306 1307 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1308 EmitBlockWithFallThrough(CaseDest, &S); 1309 if (SwitchWeights) 1310 SwitchWeights->push_back(getProfileCount(&S)); 1311 SwitchInsn->addCase(CaseVal, CaseDest); 1312 1313 // Recursively emitting the statement is acceptable, but is not wonderful for 1314 // code where we have many case statements nested together, i.e.: 1315 // case 1: 1316 // case 2: 1317 // case 3: etc. 1318 // Handling this recursively will create a new block for each case statement 1319 // that falls through to the next case which is IR intensive. It also causes 1320 // deep recursion which can run into stack depth limitations. Handle 1321 // sequential non-range case statements specially. 1322 const CaseStmt *CurCase = &S; 1323 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1324 1325 // Otherwise, iteratively add consecutive cases to this switch stmt. 1326 while (NextCase && NextCase->getRHS() == nullptr) { 1327 CurCase = NextCase; 1328 llvm::ConstantInt *CaseVal = 1329 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1330 1331 if (SwitchWeights) 1332 SwitchWeights->push_back(getProfileCount(NextCase)); 1333 if (CGM.getCodeGenOpts().hasProfileClangInstr()) { 1334 CaseDest = createBasicBlock("sw.bb"); 1335 EmitBlockWithFallThrough(CaseDest, &S); 1336 } 1337 1338 SwitchInsn->addCase(CaseVal, CaseDest); 1339 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1340 } 1341 1342 // Normal default recursion for non-cases. 1343 EmitStmt(CurCase->getSubStmt()); 1344 } 1345 1346 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { 1347 // If there is no enclosing switch instance that we're aware of, then this 1348 // default statement can be elided. This situation only happens when we've 1349 // constant-folded the switch. 1350 if (!SwitchInsn) { 1351 EmitStmt(S.getSubStmt()); 1352 return; 1353 } 1354 1355 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1356 assert(DefaultBlock->empty() && 1357 "EmitDefaultStmt: Default block already defined?"); 1358 1359 EmitBlockWithFallThrough(DefaultBlock, &S); 1360 1361 EmitStmt(S.getSubStmt()); 1362 } 1363 1364 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1365 /// constant value that is being switched on, see if we can dead code eliminate 1366 /// the body of the switch to a simple series of statements to emit. Basically, 1367 /// on a switch (5) we want to find these statements: 1368 /// case 5: 1369 /// printf(...); <-- 1370 /// ++i; <-- 1371 /// break; 1372 /// 1373 /// and add them to the ResultStmts vector. If it is unsafe to do this 1374 /// transformation (for example, one of the elided statements contains a label 1375 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1376 /// should include statements after it (e.g. the printf() line is a substmt of 1377 /// the case) then return CSFC_FallThrough. If we handled it and found a break 1378 /// statement, then return CSFC_Success. 1379 /// 1380 /// If Case is non-null, then we are looking for the specified case, checking 1381 /// that nothing we jump over contains labels. If Case is null, then we found 1382 /// the case and are looking for the break. 1383 /// 1384 /// If the recursive walk actually finds our Case, then we set FoundCase to 1385 /// true. 1386 /// 1387 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 1388 static CSFC_Result CollectStatementsForCase(const Stmt *S, 1389 const SwitchCase *Case, 1390 bool &FoundCase, 1391 SmallVectorImpl<const Stmt*> &ResultStmts) { 1392 // If this is a null statement, just succeed. 1393 if (!S) 1394 return Case ? CSFC_Success : CSFC_FallThrough; 1395 1396 // If this is the switchcase (case 4: or default) that we're looking for, then 1397 // we're in business. Just add the substatement. 1398 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 1399 if (S == Case) { 1400 FoundCase = true; 1401 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, 1402 ResultStmts); 1403 } 1404 1405 // Otherwise, this is some other case or default statement, just ignore it. 1406 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 1407 ResultStmts); 1408 } 1409 1410 // If we are in the live part of the code and we found our break statement, 1411 // return a success! 1412 if (!Case && isa<BreakStmt>(S)) 1413 return CSFC_Success; 1414 1415 // If this is a switch statement, then it might contain the SwitchCase, the 1416 // break, or neither. 1417 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 1418 // Handle this as two cases: we might be looking for the SwitchCase (if so 1419 // the skipped statements must be skippable) or we might already have it. 1420 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 1421 bool StartedInLiveCode = FoundCase; 1422 unsigned StartSize = ResultStmts.size(); 1423 1424 // If we've not found the case yet, scan through looking for it. 1425 if (Case) { 1426 // Keep track of whether we see a skipped declaration. The code could be 1427 // using the declaration even if it is skipped, so we can't optimize out 1428 // the decl if the kept statements might refer to it. 1429 bool HadSkippedDecl = false; 1430 1431 // If we're looking for the case, just see if we can skip each of the 1432 // substatements. 1433 for (; Case && I != E; ++I) { 1434 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I); 1435 1436 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 1437 case CSFC_Failure: return CSFC_Failure; 1438 case CSFC_Success: 1439 // A successful result means that either 1) that the statement doesn't 1440 // have the case and is skippable, or 2) does contain the case value 1441 // and also contains the break to exit the switch. In the later case, 1442 // we just verify the rest of the statements are elidable. 1443 if (FoundCase) { 1444 // If we found the case and skipped declarations, we can't do the 1445 // optimization. 1446 if (HadSkippedDecl) 1447 return CSFC_Failure; 1448 1449 for (++I; I != E; ++I) 1450 if (CodeGenFunction::ContainsLabel(*I, true)) 1451 return CSFC_Failure; 1452 return CSFC_Success; 1453 } 1454 break; 1455 case CSFC_FallThrough: 1456 // If we have a fallthrough condition, then we must have found the 1457 // case started to include statements. Consider the rest of the 1458 // statements in the compound statement as candidates for inclusion. 1459 assert(FoundCase && "Didn't find case but returned fallthrough?"); 1460 // We recursively found Case, so we're not looking for it anymore. 1461 Case = nullptr; 1462 1463 // If we found the case and skipped declarations, we can't do the 1464 // optimization. 1465 if (HadSkippedDecl) 1466 return CSFC_Failure; 1467 break; 1468 } 1469 } 1470 1471 if (!FoundCase) 1472 return CSFC_Success; 1473 1474 assert(!HadSkippedDecl && "fallthrough after skipping decl"); 1475 } 1476 1477 // If we have statements in our range, then we know that the statements are 1478 // live and need to be added to the set of statements we're tracking. 1479 bool AnyDecls = false; 1480 for (; I != E; ++I) { 1481 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I); 1482 1483 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { 1484 case CSFC_Failure: return CSFC_Failure; 1485 case CSFC_FallThrough: 1486 // A fallthrough result means that the statement was simple and just 1487 // included in ResultStmt, keep adding them afterwards. 1488 break; 1489 case CSFC_Success: 1490 // A successful result means that we found the break statement and 1491 // stopped statement inclusion. We just ensure that any leftover stmts 1492 // are skippable and return success ourselves. 1493 for (++I; I != E; ++I) 1494 if (CodeGenFunction::ContainsLabel(*I, true)) 1495 return CSFC_Failure; 1496 return CSFC_Success; 1497 } 1498 } 1499 1500 // If we're about to fall out of a scope without hitting a 'break;', we 1501 // can't perform the optimization if there were any decls in that scope 1502 // (we'd lose their end-of-lifetime). 1503 if (AnyDecls) { 1504 // If the entire compound statement was live, there's one more thing we 1505 // can try before giving up: emit the whole thing as a single statement. 1506 // We can do that unless the statement contains a 'break;'. 1507 // FIXME: Such a break must be at the end of a construct within this one. 1508 // We could emit this by just ignoring the BreakStmts entirely. 1509 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) { 1510 ResultStmts.resize(StartSize); 1511 ResultStmts.push_back(S); 1512 } else { 1513 return CSFC_Failure; 1514 } 1515 } 1516 1517 return CSFC_FallThrough; 1518 } 1519 1520 // Okay, this is some other statement that we don't handle explicitly, like a 1521 // for statement or increment etc. If we are skipping over this statement, 1522 // just verify it doesn't have labels, which would make it invalid to elide. 1523 if (Case) { 1524 if (CodeGenFunction::ContainsLabel(S, true)) 1525 return CSFC_Failure; 1526 return CSFC_Success; 1527 } 1528 1529 // Otherwise, we want to include this statement. Everything is cool with that 1530 // so long as it doesn't contain a break out of the switch we're in. 1531 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 1532 1533 // Otherwise, everything is great. Include the statement and tell the caller 1534 // that we fall through and include the next statement as well. 1535 ResultStmts.push_back(S); 1536 return CSFC_FallThrough; 1537 } 1538 1539 /// FindCaseStatementsForValue - Find the case statement being jumped to and 1540 /// then invoke CollectStatementsForCase to find the list of statements to emit 1541 /// for a switch on constant. See the comment above CollectStatementsForCase 1542 /// for more details. 1543 static bool FindCaseStatementsForValue(const SwitchStmt &S, 1544 const llvm::APSInt &ConstantCondValue, 1545 SmallVectorImpl<const Stmt*> &ResultStmts, 1546 ASTContext &C, 1547 const SwitchCase *&ResultCase) { 1548 // First step, find the switch case that is being branched to. We can do this 1549 // efficiently by scanning the SwitchCase list. 1550 const SwitchCase *Case = S.getSwitchCaseList(); 1551 const DefaultStmt *DefaultCase = nullptr; 1552 1553 for (; Case; Case = Case->getNextSwitchCase()) { 1554 // It's either a default or case. Just remember the default statement in 1555 // case we're not jumping to any numbered cases. 1556 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 1557 DefaultCase = DS; 1558 continue; 1559 } 1560 1561 // Check to see if this case is the one we're looking for. 1562 const CaseStmt *CS = cast<CaseStmt>(Case); 1563 // Don't handle case ranges yet. 1564 if (CS->getRHS()) return false; 1565 1566 // If we found our case, remember it as 'case'. 1567 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 1568 break; 1569 } 1570 1571 // If we didn't find a matching case, we use a default if it exists, or we 1572 // elide the whole switch body! 1573 if (!Case) { 1574 // It is safe to elide the body of the switch if it doesn't contain labels 1575 // etc. If it is safe, return successfully with an empty ResultStmts list. 1576 if (!DefaultCase) 1577 return !CodeGenFunction::ContainsLabel(&S); 1578 Case = DefaultCase; 1579 } 1580 1581 // Ok, we know which case is being jumped to, try to collect all the 1582 // statements that follow it. This can fail for a variety of reasons. Also, 1583 // check to see that the recursive walk actually found our case statement. 1584 // Insane cases like this can fail to find it in the recursive walk since we 1585 // don't handle every stmt kind: 1586 // switch (4) { 1587 // while (1) { 1588 // case 4: ... 1589 bool FoundCase = false; 1590 ResultCase = Case; 1591 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 1592 ResultStmts) != CSFC_Failure && 1593 FoundCase; 1594 } 1595 1596 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 1597 // Handle nested switch statements. 1598 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 1599 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; 1600 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 1601 1602 // See if we can constant fold the condition of the switch and therefore only 1603 // emit the live case statement (if any) of the switch. 1604 llvm::APSInt ConstantCondValue; 1605 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 1606 SmallVector<const Stmt*, 4> CaseStmts; 1607 const SwitchCase *Case = nullptr; 1608 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 1609 getContext(), Case)) { 1610 if (Case) 1611 incrementProfileCounter(Case); 1612 RunCleanupsScope ExecutedScope(*this); 1613 1614 if (S.getInit()) 1615 EmitStmt(S.getInit()); 1616 1617 // Emit the condition variable if needed inside the entire cleanup scope 1618 // used by this special case for constant folded switches. 1619 if (S.getConditionVariable()) 1620 EmitDecl(*S.getConditionVariable()); 1621 1622 // At this point, we are no longer "within" a switch instance, so 1623 // we can temporarily enforce this to ensure that any embedded case 1624 // statements are not emitted. 1625 SwitchInsn = nullptr; 1626 1627 // Okay, we can dead code eliminate everything except this case. Emit the 1628 // specified series of statements and we're good. 1629 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) 1630 EmitStmt(CaseStmts[i]); 1631 incrementProfileCounter(&S); 1632 1633 // Now we want to restore the saved switch instance so that nested 1634 // switches continue to function properly 1635 SwitchInsn = SavedSwitchInsn; 1636 1637 return; 1638 } 1639 } 1640 1641 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 1642 1643 RunCleanupsScope ConditionScope(*this); 1644 1645 if (S.getInit()) 1646 EmitStmt(S.getInit()); 1647 1648 if (S.getConditionVariable()) 1649 EmitDecl(*S.getConditionVariable()); 1650 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 1651 1652 // Create basic block to hold stuff that comes after switch 1653 // statement. We also need to create a default block now so that 1654 // explicit case ranges tests can have a place to jump to on 1655 // failure. 1656 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 1657 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 1658 if (PGO.haveRegionCounts()) { 1659 // Walk the SwitchCase list to find how many there are. 1660 uint64_t DefaultCount = 0; 1661 unsigned NumCases = 0; 1662 for (const SwitchCase *Case = S.getSwitchCaseList(); 1663 Case; 1664 Case = Case->getNextSwitchCase()) { 1665 if (isa<DefaultStmt>(Case)) 1666 DefaultCount = getProfileCount(Case); 1667 NumCases += 1; 1668 } 1669 SwitchWeights = new SmallVector<uint64_t, 16>(); 1670 SwitchWeights->reserve(NumCases); 1671 // The default needs to be first. We store the edge count, so we already 1672 // know the right weight. 1673 SwitchWeights->push_back(DefaultCount); 1674 } 1675 CaseRangeBlock = DefaultBlock; 1676 1677 // Clear the insertion point to indicate we are in unreachable code. 1678 Builder.ClearInsertionPoint(); 1679 1680 // All break statements jump to NextBlock. If BreakContinueStack is non-empty 1681 // then reuse last ContinueBlock. 1682 JumpDest OuterContinue; 1683 if (!BreakContinueStack.empty()) 1684 OuterContinue = BreakContinueStack.back().ContinueBlock; 1685 1686 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 1687 1688 // Emit switch body. 1689 EmitStmt(S.getBody()); 1690 1691 BreakContinueStack.pop_back(); 1692 1693 // Update the default block in case explicit case range tests have 1694 // been chained on top. 1695 SwitchInsn->setDefaultDest(CaseRangeBlock); 1696 1697 // If a default was never emitted: 1698 if (!DefaultBlock->getParent()) { 1699 // If we have cleanups, emit the default block so that there's a 1700 // place to jump through the cleanups from. 1701 if (ConditionScope.requiresCleanups()) { 1702 EmitBlock(DefaultBlock); 1703 1704 // Otherwise, just forward the default block to the switch end. 1705 } else { 1706 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 1707 delete DefaultBlock; 1708 } 1709 } 1710 1711 ConditionScope.ForceCleanup(); 1712 1713 // Emit continuation. 1714 EmitBlock(SwitchExit.getBlock(), true); 1715 incrementProfileCounter(&S); 1716 1717 // If the switch has a condition wrapped by __builtin_unpredictable, 1718 // create metadata that specifies that the switch is unpredictable. 1719 // Don't bother if not optimizing because that metadata would not be used. 1720 auto *Call = dyn_cast<CallExpr>(S.getCond()); 1721 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1722 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1723 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1724 llvm::MDBuilder MDHelper(getLLVMContext()); 1725 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable, 1726 MDHelper.createUnpredictable()); 1727 } 1728 } 1729 1730 if (SwitchWeights) { 1731 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && 1732 "switch weights do not match switch cases"); 1733 // If there's only one jump destination there's no sense weighting it. 1734 if (SwitchWeights->size() > 1) 1735 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 1736 createProfileWeights(*SwitchWeights)); 1737 delete SwitchWeights; 1738 } 1739 SwitchInsn = SavedSwitchInsn; 1740 SwitchWeights = SavedSwitchWeights; 1741 CaseRangeBlock = SavedCRBlock; 1742 } 1743 1744 static std::string 1745 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 1746 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { 1747 std::string Result; 1748 1749 while (*Constraint) { 1750 switch (*Constraint) { 1751 default: 1752 Result += Target.convertConstraint(Constraint); 1753 break; 1754 // Ignore these 1755 case '*': 1756 case '?': 1757 case '!': 1758 case '=': // Will see this and the following in mult-alt constraints. 1759 case '+': 1760 break; 1761 case '#': // Ignore the rest of the constraint alternative. 1762 while (Constraint[1] && Constraint[1] != ',') 1763 Constraint++; 1764 break; 1765 case '&': 1766 case '%': 1767 Result += *Constraint; 1768 while (Constraint[1] && Constraint[1] == *Constraint) 1769 Constraint++; 1770 break; 1771 case ',': 1772 Result += "|"; 1773 break; 1774 case 'g': 1775 Result += "imr"; 1776 break; 1777 case '[': { 1778 assert(OutCons && 1779 "Must pass output names to constraints with a symbolic name"); 1780 unsigned Index; 1781 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); 1782 assert(result && "Could not resolve symbolic name"); (void)result; 1783 Result += llvm::utostr(Index); 1784 break; 1785 } 1786 } 1787 1788 Constraint++; 1789 } 1790 1791 return Result; 1792 } 1793 1794 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 1795 /// as using a particular register add that as a constraint that will be used 1796 /// in this asm stmt. 1797 static std::string 1798 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 1799 const TargetInfo &Target, CodeGenModule &CGM, 1800 const AsmStmt &Stmt, const bool EarlyClobber) { 1801 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 1802 if (!AsmDeclRef) 1803 return Constraint; 1804 const ValueDecl &Value = *AsmDeclRef->getDecl(); 1805 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 1806 if (!Variable) 1807 return Constraint; 1808 if (Variable->getStorageClass() != SC_Register) 1809 return Constraint; 1810 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 1811 if (!Attr) 1812 return Constraint; 1813 StringRef Register = Attr->getLabel(); 1814 assert(Target.isValidGCCRegisterName(Register)); 1815 // We're using validateOutputConstraint here because we only care if 1816 // this is a register constraint. 1817 TargetInfo::ConstraintInfo Info(Constraint, ""); 1818 if (Target.validateOutputConstraint(Info) && 1819 !Info.allowsRegister()) { 1820 CGM.ErrorUnsupported(&Stmt, "__asm__"); 1821 return Constraint; 1822 } 1823 // Canonicalize the register here before returning it. 1824 Register = Target.getNormalizedGCCRegisterName(Register); 1825 return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; 1826 } 1827 1828 llvm::Value* 1829 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, 1830 LValue InputValue, QualType InputType, 1831 std::string &ConstraintStr, 1832 SourceLocation Loc) { 1833 llvm::Value *Arg; 1834 if (Info.allowsRegister() || !Info.allowsMemory()) { 1835 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { 1836 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal(); 1837 } else { 1838 llvm::Type *Ty = ConvertType(InputType); 1839 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 1840 if (Size <= 64 && llvm::isPowerOf2_64(Size)) { 1841 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 1842 Ty = llvm::PointerType::getUnqual(Ty); 1843 1844 Arg = Builder.CreateLoad( 1845 Builder.CreateBitCast(InputValue.getAddress(*this), Ty)); 1846 } else { 1847 Arg = InputValue.getPointer(*this); 1848 ConstraintStr += '*'; 1849 } 1850 } 1851 } else { 1852 Arg = InputValue.getPointer(*this); 1853 ConstraintStr += '*'; 1854 } 1855 1856 return Arg; 1857 } 1858 1859 llvm::Value* CodeGenFunction::EmitAsmInput( 1860 const TargetInfo::ConstraintInfo &Info, 1861 const Expr *InputExpr, 1862 std::string &ConstraintStr) { 1863 // If this can't be a register or memory, i.e., has to be a constant 1864 // (immediate or symbolic), try to emit it as such. 1865 if (!Info.allowsRegister() && !Info.allowsMemory()) { 1866 if (Info.requiresImmediateConstant()) { 1867 Expr::EvalResult EVResult; 1868 InputExpr->EvaluateAsRValue(EVResult, getContext(), true); 1869 1870 llvm::APSInt IntResult; 1871 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), 1872 getContext())) 1873 return llvm::ConstantInt::get(getLLVMContext(), IntResult); 1874 } 1875 1876 Expr::EvalResult Result; 1877 if (InputExpr->EvaluateAsInt(Result, getContext())) 1878 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()); 1879 } 1880 1881 if (Info.allowsRegister() || !Info.allowsMemory()) 1882 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 1883 return EmitScalarExpr(InputExpr); 1884 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) 1885 return EmitScalarExpr(InputExpr); 1886 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 1887 LValue Dest = EmitLValue(InputExpr); 1888 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 1889 InputExpr->getExprLoc()); 1890 } 1891 1892 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 1893 /// asm call instruction. The !srcloc MDNode contains a list of constant 1894 /// integers which are the source locations of the start of each line in the 1895 /// asm. 1896 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 1897 CodeGenFunction &CGF) { 1898 SmallVector<llvm::Metadata *, 8> Locs; 1899 // Add the location of the first line to the MDNode. 1900 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 1901 CGF.Int32Ty, Str->getBeginLoc().getRawEncoding()))); 1902 StringRef StrVal = Str->getString(); 1903 if (!StrVal.empty()) { 1904 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 1905 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 1906 unsigned StartToken = 0; 1907 unsigned ByteOffset = 0; 1908 1909 // Add the location of the start of each subsequent line of the asm to the 1910 // MDNode. 1911 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) { 1912 if (StrVal[i] != '\n') continue; 1913 SourceLocation LineLoc = Str->getLocationOfByte( 1914 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset); 1915 Locs.push_back(llvm::ConstantAsMetadata::get( 1916 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding()))); 1917 } 1918 } 1919 1920 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 1921 } 1922 1923 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, 1924 bool ReadOnly, bool ReadNone, const AsmStmt &S, 1925 const std::vector<llvm::Type *> &ResultRegTypes, 1926 CodeGenFunction &CGF, 1927 std::vector<llvm::Value *> &RegResults) { 1928 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1929 llvm::Attribute::NoUnwind); 1930 // Attach readnone and readonly attributes. 1931 if (!HasSideEffect) { 1932 if (ReadNone) 1933 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1934 llvm::Attribute::ReadNone); 1935 else if (ReadOnly) 1936 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1937 llvm::Attribute::ReadOnly); 1938 } 1939 1940 // Slap the source location of the inline asm into a !srcloc metadata on the 1941 // call. 1942 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) 1943 Result.setMetadata("srcloc", 1944 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF)); 1945 else { 1946 // At least put the line number on MS inline asm blobs. 1947 llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty, 1948 S.getAsmLoc().getRawEncoding()); 1949 Result.setMetadata("srcloc", 1950 llvm::MDNode::get(CGF.getLLVMContext(), 1951 llvm::ConstantAsMetadata::get(Loc))); 1952 } 1953 1954 if (CGF.getLangOpts().assumeFunctionsAreConvergent()) 1955 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as 1956 // convergent (meaning, they may call an intrinsically convergent op, such 1957 // as bar.sync, and so can't have certain optimizations applied around 1958 // them). 1959 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1960 llvm::Attribute::Convergent); 1961 // Extract all of the register value results from the asm. 1962 if (ResultRegTypes.size() == 1) { 1963 RegResults.push_back(&Result); 1964 } else { 1965 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 1966 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult"); 1967 RegResults.push_back(Tmp); 1968 } 1969 } 1970 } 1971 1972 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 1973 // Assemble the final asm string. 1974 std::string AsmString = S.generateAsmString(getContext()); 1975 1976 // Get all the output and input constraints together. 1977 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 1978 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 1979 1980 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 1981 StringRef Name; 1982 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1983 Name = GAS->getOutputName(i); 1984 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 1985 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 1986 assert(IsValid && "Failed to parse output constraint"); 1987 OutputConstraintInfos.push_back(Info); 1988 } 1989 1990 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 1991 StringRef Name; 1992 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 1993 Name = GAS->getInputName(i); 1994 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 1995 bool IsValid = 1996 getTarget().validateInputConstraint(OutputConstraintInfos, Info); 1997 assert(IsValid && "Failed to parse input constraint"); (void)IsValid; 1998 InputConstraintInfos.push_back(Info); 1999 } 2000 2001 std::string Constraints; 2002 2003 std::vector<LValue> ResultRegDests; 2004 std::vector<QualType> ResultRegQualTys; 2005 std::vector<llvm::Type *> ResultRegTypes; 2006 std::vector<llvm::Type *> ResultTruncRegTypes; 2007 std::vector<llvm::Type *> ArgTypes; 2008 std::vector<llvm::Value*> Args; 2009 llvm::BitVector ResultTypeRequiresCast; 2010 2011 // Keep track of inout constraints. 2012 std::string InOutConstraints; 2013 std::vector<llvm::Value*> InOutArgs; 2014 std::vector<llvm::Type*> InOutArgTypes; 2015 2016 // Keep track of out constraints for tied input operand. 2017 std::vector<std::string> OutputConstraints; 2018 2019 // An inline asm can be marked readonly if it meets the following conditions: 2020 // - it doesn't have any sideeffects 2021 // - it doesn't clobber memory 2022 // - it doesn't return a value by-reference 2023 // It can be marked readnone if it doesn't have any input memory constraints 2024 // in addition to meeting the conditions listed above. 2025 bool ReadOnly = true, ReadNone = true; 2026 2027 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 2028 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 2029 2030 // Simplify the output constraint. 2031 std::string OutputConstraint(S.getOutputConstraint(i)); 2032 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 2033 getTarget(), &OutputConstraintInfos); 2034 2035 const Expr *OutExpr = S.getOutputExpr(i); 2036 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 2037 2038 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 2039 getTarget(), CGM, S, 2040 Info.earlyClobber()); 2041 OutputConstraints.push_back(OutputConstraint); 2042 LValue Dest = EmitLValue(OutExpr); 2043 if (!Constraints.empty()) 2044 Constraints += ','; 2045 2046 // If this is a register output, then make the inline asm return it 2047 // by-value. If this is a memory result, return the value by-reference. 2048 bool isScalarizableAggregate = 2049 hasAggregateEvaluationKind(OutExpr->getType()); 2050 if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) || 2051 isScalarizableAggregate)) { 2052 Constraints += "=" + OutputConstraint; 2053 ResultRegQualTys.push_back(OutExpr->getType()); 2054 ResultRegDests.push_back(Dest); 2055 ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType())); 2056 if (Info.allowsRegister() && isScalarizableAggregate) { 2057 ResultTypeRequiresCast.push_back(true); 2058 unsigned Size = getContext().getTypeSize(OutExpr->getType()); 2059 llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size); 2060 ResultRegTypes.push_back(ConvTy); 2061 } else { 2062 ResultTypeRequiresCast.push_back(false); 2063 ResultRegTypes.push_back(ResultTruncRegTypes.back()); 2064 } 2065 // If this output is tied to an input, and if the input is larger, then 2066 // we need to set the actual result type of the inline asm node to be the 2067 // same as the input type. 2068 if (Info.hasMatchingInput()) { 2069 unsigned InputNo; 2070 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 2071 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 2072 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 2073 break; 2074 } 2075 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 2076 2077 QualType InputTy = S.getInputExpr(InputNo)->getType(); 2078 QualType OutputType = OutExpr->getType(); 2079 2080 uint64_t InputSize = getContext().getTypeSize(InputTy); 2081 if (getContext().getTypeSize(OutputType) < InputSize) { 2082 // Form the asm to return the value as a larger integer or fp type. 2083 ResultRegTypes.back() = ConvertType(InputTy); 2084 } 2085 } 2086 if (llvm::Type* AdjTy = 2087 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2088 ResultRegTypes.back())) 2089 ResultRegTypes.back() = AdjTy; 2090 else { 2091 CGM.getDiags().Report(S.getAsmLoc(), 2092 diag::err_asm_invalid_type_in_input) 2093 << OutExpr->getType() << OutputConstraint; 2094 } 2095 2096 // Update largest vector width for any vector types. 2097 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back())) 2098 LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, 2099 VT->getPrimitiveSizeInBits().getFixedSize()); 2100 } else { 2101 ArgTypes.push_back(Dest.getAddress(*this).getType()); 2102 Args.push_back(Dest.getPointer(*this)); 2103 Constraints += "=*"; 2104 Constraints += OutputConstraint; 2105 ReadOnly = ReadNone = false; 2106 } 2107 2108 if (Info.isReadWrite()) { 2109 InOutConstraints += ','; 2110 2111 const Expr *InputExpr = S.getOutputExpr(i); 2112 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(), 2113 InOutConstraints, 2114 InputExpr->getExprLoc()); 2115 2116 if (llvm::Type* AdjTy = 2117 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2118 Arg->getType())) 2119 Arg = Builder.CreateBitCast(Arg, AdjTy); 2120 2121 // Update largest vector width for any vector types. 2122 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 2123 LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, 2124 VT->getPrimitiveSizeInBits().getFixedSize()); 2125 if (Info.allowsRegister()) 2126 InOutConstraints += llvm::utostr(i); 2127 else 2128 InOutConstraints += OutputConstraint; 2129 2130 InOutArgTypes.push_back(Arg->getType()); 2131 InOutArgs.push_back(Arg); 2132 } 2133 } 2134 2135 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) 2136 // to the return value slot. Only do this when returning in registers. 2137 if (isa<MSAsmStmt>(&S)) { 2138 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 2139 if (RetAI.isDirect() || RetAI.isExtend()) { 2140 // Make a fake lvalue for the return value slot. 2141 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy); 2142 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( 2143 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, 2144 ResultRegDests, AsmString, S.getNumOutputs()); 2145 SawAsmBlock = true; 2146 } 2147 } 2148 2149 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 2150 const Expr *InputExpr = S.getInputExpr(i); 2151 2152 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 2153 2154 if (Info.allowsMemory()) 2155 ReadNone = false; 2156 2157 if (!Constraints.empty()) 2158 Constraints += ','; 2159 2160 // Simplify the input constraint. 2161 std::string InputConstraint(S.getInputConstraint(i)); 2162 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 2163 &OutputConstraintInfos); 2164 2165 InputConstraint = AddVariableConstraints( 2166 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), 2167 getTarget(), CGM, S, false /* No EarlyClobber */); 2168 2169 std::string ReplaceConstraint (InputConstraint); 2170 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); 2171 2172 // If this input argument is tied to a larger output result, extend the 2173 // input to be the same size as the output. The LLVM backend wants to see 2174 // the input and output of a matching constraint be the same size. Note 2175 // that GCC does not define what the top bits are here. We use zext because 2176 // that is usually cheaper, but LLVM IR should really get an anyext someday. 2177 if (Info.hasTiedOperand()) { 2178 unsigned Output = Info.getTiedOperand(); 2179 QualType OutputType = S.getOutputExpr(Output)->getType(); 2180 QualType InputTy = InputExpr->getType(); 2181 2182 if (getContext().getTypeSize(OutputType) > 2183 getContext().getTypeSize(InputTy)) { 2184 // Use ptrtoint as appropriate so that we can do our extension. 2185 if (isa<llvm::PointerType>(Arg->getType())) 2186 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 2187 llvm::Type *OutputTy = ConvertType(OutputType); 2188 if (isa<llvm::IntegerType>(OutputTy)) 2189 Arg = Builder.CreateZExt(Arg, OutputTy); 2190 else if (isa<llvm::PointerType>(OutputTy)) 2191 Arg = Builder.CreateZExt(Arg, IntPtrTy); 2192 else { 2193 assert(OutputTy->isFloatingPointTy() && "Unexpected output type"); 2194 Arg = Builder.CreateFPExt(Arg, OutputTy); 2195 } 2196 } 2197 // Deal with the tied operands' constraint code in adjustInlineAsmType. 2198 ReplaceConstraint = OutputConstraints[Output]; 2199 } 2200 if (llvm::Type* AdjTy = 2201 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint, 2202 Arg->getType())) 2203 Arg = Builder.CreateBitCast(Arg, AdjTy); 2204 else 2205 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 2206 << InputExpr->getType() << InputConstraint; 2207 2208 // Update largest vector width for any vector types. 2209 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 2210 LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, 2211 VT->getPrimitiveSizeInBits().getFixedSize()); 2212 2213 ArgTypes.push_back(Arg->getType()); 2214 Args.push_back(Arg); 2215 Constraints += InputConstraint; 2216 } 2217 2218 // Labels 2219 SmallVector<llvm::BasicBlock *, 16> Transfer; 2220 llvm::BasicBlock *Fallthrough = nullptr; 2221 bool IsGCCAsmGoto = false; 2222 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) { 2223 IsGCCAsmGoto = GS->isAsmGoto(); 2224 if (IsGCCAsmGoto) { 2225 for (const auto *E : GS->labels()) { 2226 JumpDest Dest = getJumpDestForLabel(E->getLabel()); 2227 Transfer.push_back(Dest.getBlock()); 2228 llvm::BlockAddress *BA = 2229 llvm::BlockAddress::get(CurFn, Dest.getBlock()); 2230 Args.push_back(BA); 2231 ArgTypes.push_back(BA->getType()); 2232 if (!Constraints.empty()) 2233 Constraints += ','; 2234 Constraints += 'X'; 2235 } 2236 Fallthrough = createBasicBlock("asm.fallthrough"); 2237 } 2238 } 2239 2240 // Append the "input" part of inout constraints last. 2241 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 2242 ArgTypes.push_back(InOutArgTypes[i]); 2243 Args.push_back(InOutArgs[i]); 2244 } 2245 Constraints += InOutConstraints; 2246 2247 // Clobbers 2248 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 2249 StringRef Clobber = S.getClobber(i); 2250 2251 if (Clobber == "memory") 2252 ReadOnly = ReadNone = false; 2253 else if (Clobber != "cc") { 2254 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 2255 if (CGM.getCodeGenOpts().StackClashProtector && 2256 getTarget().isSPRegName(Clobber)) { 2257 CGM.getDiags().Report(S.getAsmLoc(), 2258 diag::warn_stack_clash_protection_inline_asm); 2259 } 2260 } 2261 2262 if (!Constraints.empty()) 2263 Constraints += ','; 2264 2265 Constraints += "~{"; 2266 Constraints += Clobber; 2267 Constraints += '}'; 2268 } 2269 2270 // Add machine specific clobbers 2271 std::string MachineClobbers = getTarget().getClobbers(); 2272 if (!MachineClobbers.empty()) { 2273 if (!Constraints.empty()) 2274 Constraints += ','; 2275 Constraints += MachineClobbers; 2276 } 2277 2278 llvm::Type *ResultType; 2279 if (ResultRegTypes.empty()) 2280 ResultType = VoidTy; 2281 else if (ResultRegTypes.size() == 1) 2282 ResultType = ResultRegTypes[0]; 2283 else 2284 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 2285 2286 llvm::FunctionType *FTy = 2287 llvm::FunctionType::get(ResultType, ArgTypes, false); 2288 2289 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 2290 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 2291 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; 2292 llvm::InlineAsm *IA = 2293 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, 2294 /* IsAlignStack */ false, AsmDialect); 2295 std::vector<llvm::Value*> RegResults; 2296 if (IsGCCAsmGoto) { 2297 llvm::CallBrInst *Result = 2298 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); 2299 EmitBlock(Fallthrough); 2300 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly, 2301 ReadNone, S, ResultRegTypes, *this, RegResults); 2302 } else { 2303 llvm::CallInst *Result = 2304 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); 2305 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly, 2306 ReadNone, S, ResultRegTypes, *this, RegResults); 2307 } 2308 2309 assert(RegResults.size() == ResultRegTypes.size()); 2310 assert(RegResults.size() == ResultTruncRegTypes.size()); 2311 assert(RegResults.size() == ResultRegDests.size()); 2312 // ResultRegDests can be also populated by addReturnRegisterOutputs() above, 2313 // in which case its size may grow. 2314 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); 2315 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 2316 llvm::Value *Tmp = RegResults[i]; 2317 2318 // If the result type of the LLVM IR asm doesn't match the result type of 2319 // the expression, do the conversion. 2320 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { 2321 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 2322 2323 // Truncate the integer result to the right size, note that TruncTy can be 2324 // a pointer. 2325 if (TruncTy->isFloatingPointTy()) 2326 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 2327 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 2328 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 2329 Tmp = Builder.CreateTrunc(Tmp, 2330 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); 2331 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 2332 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 2333 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 2334 Tmp = Builder.CreatePtrToInt(Tmp, 2335 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); 2336 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2337 } else if (TruncTy->isIntegerTy()) { 2338 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); 2339 } else if (TruncTy->isVectorTy()) { 2340 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 2341 } 2342 } 2343 2344 LValue Dest = ResultRegDests[i]; 2345 // ResultTypeRequiresCast elements correspond to the first 2346 // ResultTypeRequiresCast.size() elements of RegResults. 2347 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { 2348 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]); 2349 Address A = Builder.CreateBitCast(Dest.getAddress(*this), 2350 ResultRegTypes[i]->getPointerTo()); 2351 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false); 2352 if (Ty.isNull()) { 2353 const Expr *OutExpr = S.getOutputExpr(i); 2354 CGM.Error( 2355 OutExpr->getExprLoc(), 2356 "impossible constraint in asm: can't store value into a register"); 2357 return; 2358 } 2359 Dest = MakeAddrLValue(A, Ty); 2360 } 2361 EmitStoreThroughLValue(RValue::get(Tmp), Dest); 2362 } 2363 } 2364 2365 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { 2366 const RecordDecl *RD = S.getCapturedRecordDecl(); 2367 QualType RecordTy = getContext().getRecordType(RD); 2368 2369 // Initialize the captured struct. 2370 LValue SlotLV = 2371 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 2372 2373 RecordDecl::field_iterator CurField = RD->field_begin(); 2374 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 2375 E = S.capture_init_end(); 2376 I != E; ++I, ++CurField) { 2377 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 2378 if (CurField->hasCapturedVLAType()) { 2379 auto VAT = CurField->getCapturedVLAType(); 2380 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV); 2381 } else { 2382 EmitInitializerForField(*CurField, LV, *I); 2383 } 2384 } 2385 2386 return SlotLV; 2387 } 2388 2389 /// Generate an outlined function for the body of a CapturedStmt, store any 2390 /// captured variables into the captured struct, and call the outlined function. 2391 llvm::Function * 2392 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 2393 LValue CapStruct = InitCapturedStruct(S); 2394 2395 // Emit the CapturedDecl 2396 CodeGenFunction CGF(CGM, true); 2397 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); 2398 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); 2399 delete CGF.CapturedStmtInfo; 2400 2401 // Emit call to the helper function. 2402 EmitCallOrInvoke(F, CapStruct.getPointer(*this)); 2403 2404 return F; 2405 } 2406 2407 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { 2408 LValue CapStruct = InitCapturedStruct(S); 2409 return CapStruct.getAddress(*this); 2410 } 2411 2412 /// Creates the outlined function for a CapturedStmt. 2413 llvm::Function * 2414 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { 2415 assert(CapturedStmtInfo && 2416 "CapturedStmtInfo should be set when generating the captured function"); 2417 const CapturedDecl *CD = S.getCapturedDecl(); 2418 const RecordDecl *RD = S.getCapturedRecordDecl(); 2419 SourceLocation Loc = S.getBeginLoc(); 2420 assert(CD->hasBody() && "missing CapturedDecl body"); 2421 2422 // Build the argument list. 2423 ASTContext &Ctx = CGM.getContext(); 2424 FunctionArgList Args; 2425 Args.append(CD->param_begin(), CD->param_end()); 2426 2427 // Create the function declaration. 2428 const CGFunctionInfo &FuncInfo = 2429 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); 2430 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 2431 2432 llvm::Function *F = 2433 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 2434 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 2435 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 2436 if (CD->isNothrow()) 2437 F->addFnAttr(llvm::Attribute::NoUnwind); 2438 2439 // Generate the function. 2440 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), 2441 CD->getBody()->getBeginLoc()); 2442 // Set the context parameter in CapturedStmtInfo. 2443 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam()); 2444 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 2445 2446 // Initialize variable-length arrays. 2447 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), 2448 Ctx.getTagDeclType(RD)); 2449 for (auto *FD : RD->fields()) { 2450 if (FD->hasCapturedVLAType()) { 2451 auto *ExprArg = 2452 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc()) 2453 .getScalarVal(); 2454 auto VAT = FD->getCapturedVLAType(); 2455 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 2456 } 2457 } 2458 2459 // If 'this' is captured, load it into CXXThisValue. 2460 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 2461 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 2462 LValue ThisLValue = EmitLValueForField(Base, FD); 2463 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 2464 } 2465 2466 PGO.assignRegionCounters(GlobalDecl(CD), F); 2467 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 2468 FinishFunction(CD->getBodyRBrace()); 2469 2470 return F; 2471 } 2472