1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Stmt nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/StmtVisitor.h" 20 #include "clang/Basic/Builtins.h" 21 #include "clang/Basic/PrettyStackTrace.h" 22 #include "clang/Basic/SourceManager.h" 23 #include "clang/Basic/TargetInfo.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/InlineAsm.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/IR/MDBuilder.h" 30 #include "llvm/Support/SaveAndRestore.h" 31 32 using namespace clang; 33 using namespace CodeGen; 34 35 //===----------------------------------------------------------------------===// 36 // Statement Emission 37 //===----------------------------------------------------------------------===// 38 39 void CodeGenFunction::EmitStopPoint(const Stmt *S) { 40 if (CGDebugInfo *DI = getDebugInfo()) { 41 SourceLocation Loc; 42 Loc = S->getBeginLoc(); 43 DI->EmitLocation(Builder, Loc); 44 45 LastStopPoint = Loc; 46 } 47 } 48 49 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) { 50 assert(S && "Null statement?"); 51 PGO.setCurrentStmt(S); 52 53 // These statements have their own debug info handling. 54 if (EmitSimpleStmt(S)) 55 return; 56 57 // Check if we are generating unreachable code. 58 if (!HaveInsertPoint()) { 59 // If so, and the statement doesn't contain a label, then we do not need to 60 // generate actual code. This is safe because (1) the current point is 61 // unreachable, so we don't need to execute the code, and (2) we've already 62 // handled the statements which update internal data structures (like the 63 // local variable map) which could be used by subsequent statements. 64 if (!ContainsLabel(S)) { 65 // Verify that any decl statements were handled as simple, they may be in 66 // scope of subsequent reachable statements. 67 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); 68 return; 69 } 70 71 // Otherwise, make a new block to hold the code. 72 EnsureInsertPoint(); 73 } 74 75 // Generate a stoppoint if we are emitting debug info. 76 EmitStopPoint(S); 77 78 // Ignore all OpenMP directives except for simd if OpenMP with Simd is 79 // enabled. 80 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) { 81 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) { 82 EmitSimpleOMPExecutableDirective(*D); 83 return; 84 } 85 } 86 87 switch (S->getStmtClass()) { 88 case Stmt::NoStmtClass: 89 case Stmt::CXXCatchStmtClass: 90 case Stmt::SEHExceptStmtClass: 91 case Stmt::SEHFinallyStmtClass: 92 case Stmt::MSDependentExistsStmtClass: 93 llvm_unreachable("invalid statement class to emit generically"); 94 case Stmt::NullStmtClass: 95 case Stmt::CompoundStmtClass: 96 case Stmt::DeclStmtClass: 97 case Stmt::LabelStmtClass: 98 case Stmt::AttributedStmtClass: 99 case Stmt::GotoStmtClass: 100 case Stmt::BreakStmtClass: 101 case Stmt::ContinueStmtClass: 102 case Stmt::DefaultStmtClass: 103 case Stmt::CaseStmtClass: 104 case Stmt::SEHLeaveStmtClass: 105 llvm_unreachable("should have emitted these statements as simple"); 106 107 #define STMT(Type, Base) 108 #define ABSTRACT_STMT(Op) 109 #define EXPR(Type, Base) \ 110 case Stmt::Type##Class: 111 #include "clang/AST/StmtNodes.inc" 112 { 113 // Remember the block we came in on. 114 llvm::BasicBlock *incoming = Builder.GetInsertBlock(); 115 assert(incoming && "expression emission must have an insertion point"); 116 117 EmitIgnoredExpr(cast<Expr>(S)); 118 119 llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); 120 assert(outgoing && "expression emission cleared block!"); 121 122 // The expression emitters assume (reasonably!) that the insertion 123 // point is always set. To maintain that, the call-emission code 124 // for noreturn functions has to enter a new block with no 125 // predecessors. We want to kill that block and mark the current 126 // insertion point unreachable in the common case of a call like 127 // "exit();". Since expression emission doesn't otherwise create 128 // blocks with no predecessors, we can just test for that. 129 // However, we must be careful not to do this to our incoming 130 // block, because *statement* emission does sometimes create 131 // reachable blocks which will have no predecessors until later in 132 // the function. This occurs with, e.g., labels that are not 133 // reachable by fallthrough. 134 if (incoming != outgoing && outgoing->use_empty()) { 135 outgoing->eraseFromParent(); 136 Builder.ClearInsertionPoint(); 137 } 138 break; 139 } 140 141 case Stmt::IndirectGotoStmtClass: 142 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; 143 144 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; 145 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break; 146 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break; 147 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break; 148 149 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; 150 151 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; 152 case Stmt::GCCAsmStmtClass: // Intentional fall-through. 153 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; 154 case Stmt::CoroutineBodyStmtClass: 155 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S)); 156 break; 157 case Stmt::CoreturnStmtClass: 158 EmitCoreturnStmt(cast<CoreturnStmt>(*S)); 159 break; 160 case Stmt::CapturedStmtClass: { 161 const CapturedStmt *CS = cast<CapturedStmt>(S); 162 EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); 163 } 164 break; 165 case Stmt::ObjCAtTryStmtClass: 166 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); 167 break; 168 case Stmt::ObjCAtCatchStmtClass: 169 llvm_unreachable( 170 "@catch statements should be handled by EmitObjCAtTryStmt"); 171 case Stmt::ObjCAtFinallyStmtClass: 172 llvm_unreachable( 173 "@finally statements should be handled by EmitObjCAtTryStmt"); 174 case Stmt::ObjCAtThrowStmtClass: 175 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); 176 break; 177 case Stmt::ObjCAtSynchronizedStmtClass: 178 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); 179 break; 180 case Stmt::ObjCForCollectionStmtClass: 181 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); 182 break; 183 case Stmt::ObjCAutoreleasePoolStmtClass: 184 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); 185 break; 186 187 case Stmt::CXXTryStmtClass: 188 EmitCXXTryStmt(cast<CXXTryStmt>(*S)); 189 break; 190 case Stmt::CXXForRangeStmtClass: 191 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs); 192 break; 193 case Stmt::SEHTryStmtClass: 194 EmitSEHTryStmt(cast<SEHTryStmt>(*S)); 195 break; 196 case Stmt::OMPParallelDirectiveClass: 197 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); 198 break; 199 case Stmt::OMPSimdDirectiveClass: 200 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); 201 break; 202 case Stmt::OMPForDirectiveClass: 203 EmitOMPForDirective(cast<OMPForDirective>(*S)); 204 break; 205 case Stmt::OMPForSimdDirectiveClass: 206 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); 207 break; 208 case Stmt::OMPSectionsDirectiveClass: 209 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); 210 break; 211 case Stmt::OMPSectionDirectiveClass: 212 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); 213 break; 214 case Stmt::OMPSingleDirectiveClass: 215 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); 216 break; 217 case Stmt::OMPMasterDirectiveClass: 218 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); 219 break; 220 case Stmt::OMPCriticalDirectiveClass: 221 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); 222 break; 223 case Stmt::OMPParallelForDirectiveClass: 224 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); 225 break; 226 case Stmt::OMPParallelForSimdDirectiveClass: 227 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); 228 break; 229 case Stmt::OMPParallelMasterDirectiveClass: 230 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S)); 231 break; 232 case Stmt::OMPParallelSectionsDirectiveClass: 233 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); 234 break; 235 case Stmt::OMPTaskDirectiveClass: 236 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); 237 break; 238 case Stmt::OMPTaskyieldDirectiveClass: 239 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); 240 break; 241 case Stmt::OMPBarrierDirectiveClass: 242 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); 243 break; 244 case Stmt::OMPTaskwaitDirectiveClass: 245 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); 246 break; 247 case Stmt::OMPTaskgroupDirectiveClass: 248 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); 249 break; 250 case Stmt::OMPFlushDirectiveClass: 251 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); 252 break; 253 case Stmt::OMPDepobjDirectiveClass: 254 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S)); 255 break; 256 case Stmt::OMPScanDirectiveClass: 257 EmitOMPScanDirective(cast<OMPScanDirective>(*S)); 258 break; 259 case Stmt::OMPOrderedDirectiveClass: 260 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); 261 break; 262 case Stmt::OMPAtomicDirectiveClass: 263 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); 264 break; 265 case Stmt::OMPTargetDirectiveClass: 266 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); 267 break; 268 case Stmt::OMPTeamsDirectiveClass: 269 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); 270 break; 271 case Stmt::OMPCancellationPointDirectiveClass: 272 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); 273 break; 274 case Stmt::OMPCancelDirectiveClass: 275 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); 276 break; 277 case Stmt::OMPTargetDataDirectiveClass: 278 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S)); 279 break; 280 case Stmt::OMPTargetEnterDataDirectiveClass: 281 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S)); 282 break; 283 case Stmt::OMPTargetExitDataDirectiveClass: 284 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S)); 285 break; 286 case Stmt::OMPTargetParallelDirectiveClass: 287 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S)); 288 break; 289 case Stmt::OMPTargetParallelForDirectiveClass: 290 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S)); 291 break; 292 case Stmt::OMPTaskLoopDirectiveClass: 293 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S)); 294 break; 295 case Stmt::OMPTaskLoopSimdDirectiveClass: 296 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S)); 297 break; 298 case Stmt::OMPMasterTaskLoopDirectiveClass: 299 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S)); 300 break; 301 case Stmt::OMPMasterTaskLoopSimdDirectiveClass: 302 EmitOMPMasterTaskLoopSimdDirective( 303 cast<OMPMasterTaskLoopSimdDirective>(*S)); 304 break; 305 case Stmt::OMPParallelMasterTaskLoopDirectiveClass: 306 EmitOMPParallelMasterTaskLoopDirective( 307 cast<OMPParallelMasterTaskLoopDirective>(*S)); 308 break; 309 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: 310 EmitOMPParallelMasterTaskLoopSimdDirective( 311 cast<OMPParallelMasterTaskLoopSimdDirective>(*S)); 312 break; 313 case Stmt::OMPDistributeDirectiveClass: 314 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S)); 315 break; 316 case Stmt::OMPTargetUpdateDirectiveClass: 317 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S)); 318 break; 319 case Stmt::OMPDistributeParallelForDirectiveClass: 320 EmitOMPDistributeParallelForDirective( 321 cast<OMPDistributeParallelForDirective>(*S)); 322 break; 323 case Stmt::OMPDistributeParallelForSimdDirectiveClass: 324 EmitOMPDistributeParallelForSimdDirective( 325 cast<OMPDistributeParallelForSimdDirective>(*S)); 326 break; 327 case Stmt::OMPDistributeSimdDirectiveClass: 328 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S)); 329 break; 330 case Stmt::OMPTargetParallelForSimdDirectiveClass: 331 EmitOMPTargetParallelForSimdDirective( 332 cast<OMPTargetParallelForSimdDirective>(*S)); 333 break; 334 case Stmt::OMPTargetSimdDirectiveClass: 335 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S)); 336 break; 337 case Stmt::OMPTeamsDistributeDirectiveClass: 338 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S)); 339 break; 340 case Stmt::OMPTeamsDistributeSimdDirectiveClass: 341 EmitOMPTeamsDistributeSimdDirective( 342 cast<OMPTeamsDistributeSimdDirective>(*S)); 343 break; 344 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: 345 EmitOMPTeamsDistributeParallelForSimdDirective( 346 cast<OMPTeamsDistributeParallelForSimdDirective>(*S)); 347 break; 348 case Stmt::OMPTeamsDistributeParallelForDirectiveClass: 349 EmitOMPTeamsDistributeParallelForDirective( 350 cast<OMPTeamsDistributeParallelForDirective>(*S)); 351 break; 352 case Stmt::OMPTargetTeamsDirectiveClass: 353 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S)); 354 break; 355 case Stmt::OMPTargetTeamsDistributeDirectiveClass: 356 EmitOMPTargetTeamsDistributeDirective( 357 cast<OMPTargetTeamsDistributeDirective>(*S)); 358 break; 359 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: 360 EmitOMPTargetTeamsDistributeParallelForDirective( 361 cast<OMPTargetTeamsDistributeParallelForDirective>(*S)); 362 break; 363 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: 364 EmitOMPTargetTeamsDistributeParallelForSimdDirective( 365 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S)); 366 break; 367 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: 368 EmitOMPTargetTeamsDistributeSimdDirective( 369 cast<OMPTargetTeamsDistributeSimdDirective>(*S)); 370 break; 371 } 372 } 373 374 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { 375 switch (S->getStmtClass()) { 376 default: return false; 377 case Stmt::NullStmtClass: break; 378 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; 379 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; 380 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; 381 case Stmt::AttributedStmtClass: 382 EmitAttributedStmt(cast<AttributedStmt>(*S)); break; 383 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; 384 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break; 385 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; 386 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; 387 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; 388 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break; 389 } 390 391 return true; 392 } 393 394 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, 395 /// this captures the expression result of the last sub-statement and returns it 396 /// (for use by the statement expression extension). 397 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, 398 AggValueSlot AggSlot) { 399 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), 400 "LLVM IR generation of compound statement ('{}')"); 401 402 // Keep track of the current cleanup stack depth, including debug scopes. 403 LexicalScope Scope(*this, S.getSourceRange()); 404 405 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); 406 } 407 408 Address 409 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, 410 bool GetLast, 411 AggValueSlot AggSlot) { 412 413 const Stmt *ExprResult = S.getStmtExprResult(); 414 assert((!GetLast || (GetLast && ExprResult)) && 415 "If GetLast is true then the CompoundStmt must have a StmtExprResult"); 416 417 Address RetAlloca = Address::invalid(); 418 419 for (auto *CurStmt : S.body()) { 420 if (GetLast && ExprResult == CurStmt) { 421 // We have to special case labels here. They are statements, but when put 422 // at the end of a statement expression, they yield the value of their 423 // subexpression. Handle this by walking through all labels we encounter, 424 // emitting them before we evaluate the subexpr. 425 // Similar issues arise for attributed statements. 426 while (!isa<Expr>(ExprResult)) { 427 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) { 428 EmitLabel(LS->getDecl()); 429 ExprResult = LS->getSubStmt(); 430 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) { 431 // FIXME: Update this if we ever have attributes that affect the 432 // semantics of an expression. 433 ExprResult = AS->getSubStmt(); 434 } else { 435 llvm_unreachable("unknown value statement"); 436 } 437 } 438 439 EnsureInsertPoint(); 440 441 const Expr *E = cast<Expr>(ExprResult); 442 QualType ExprTy = E->getType(); 443 if (hasAggregateEvaluationKind(ExprTy)) { 444 EmitAggExpr(E, AggSlot); 445 } else { 446 // We can't return an RValue here because there might be cleanups at 447 // the end of the StmtExpr. Because of that, we have to emit the result 448 // here into a temporary alloca. 449 RetAlloca = CreateMemTemp(ExprTy); 450 EmitAnyExprToMem(E, RetAlloca, Qualifiers(), 451 /*IsInit*/ false); 452 } 453 } else { 454 EmitStmt(CurStmt); 455 } 456 } 457 458 return RetAlloca; 459 } 460 461 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { 462 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); 463 464 // If there is a cleanup stack, then we it isn't worth trying to 465 // simplify this block (we would need to remove it from the scope map 466 // and cleanup entry). 467 if (!EHStack.empty()) 468 return; 469 470 // Can only simplify direct branches. 471 if (!BI || !BI->isUnconditional()) 472 return; 473 474 // Can only simplify empty blocks. 475 if (BI->getIterator() != BB->begin()) 476 return; 477 478 BB->replaceAllUsesWith(BI->getSuccessor(0)); 479 BI->eraseFromParent(); 480 BB->eraseFromParent(); 481 } 482 483 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { 484 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 485 486 // Fall out of the current block (if necessary). 487 EmitBranch(BB); 488 489 if (IsFinished && BB->use_empty()) { 490 delete BB; 491 return; 492 } 493 494 // Place the block after the current block, if possible, or else at 495 // the end of the function. 496 if (CurBB && CurBB->getParent()) 497 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB); 498 else 499 CurFn->getBasicBlockList().push_back(BB); 500 Builder.SetInsertPoint(BB); 501 } 502 503 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { 504 // Emit a branch from the current block to the target one if this 505 // was a real block. If this was just a fall-through block after a 506 // terminator, don't emit it. 507 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 508 509 if (!CurBB || CurBB->getTerminator()) { 510 // If there is no insert point or the previous block is already 511 // terminated, don't touch it. 512 } else { 513 // Otherwise, create a fall-through branch. 514 Builder.CreateBr(Target); 515 } 516 517 Builder.ClearInsertionPoint(); 518 } 519 520 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { 521 bool inserted = false; 522 for (llvm::User *u : block->users()) { 523 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { 524 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(), 525 block); 526 inserted = true; 527 break; 528 } 529 } 530 531 if (!inserted) 532 CurFn->getBasicBlockList().push_back(block); 533 534 Builder.SetInsertPoint(block); 535 } 536 537 CodeGenFunction::JumpDest 538 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { 539 JumpDest &Dest = LabelMap[D]; 540 if (Dest.isValid()) return Dest; 541 542 // Create, but don't insert, the new block. 543 Dest = JumpDest(createBasicBlock(D->getName()), 544 EHScopeStack::stable_iterator::invalid(), 545 NextCleanupDestIndex++); 546 return Dest; 547 } 548 549 void CodeGenFunction::EmitLabel(const LabelDecl *D) { 550 // Add this label to the current lexical scope if we're within any 551 // normal cleanups. Jumps "in" to this label --- when permitted by 552 // the language --- may need to be routed around such cleanups. 553 if (EHStack.hasNormalCleanups() && CurLexicalScope) 554 CurLexicalScope->addLabel(D); 555 556 JumpDest &Dest = LabelMap[D]; 557 558 // If we didn't need a forward reference to this label, just go 559 // ahead and create a destination at the current scope. 560 if (!Dest.isValid()) { 561 Dest = getJumpDestInCurrentScope(D->getName()); 562 563 // Otherwise, we need to give this label a target depth and remove 564 // it from the branch-fixups list. 565 } else { 566 assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); 567 Dest.setScopeDepth(EHStack.stable_begin()); 568 ResolveBranchFixups(Dest.getBlock()); 569 } 570 571 EmitBlock(Dest.getBlock()); 572 573 // Emit debug info for labels. 574 if (CGDebugInfo *DI = getDebugInfo()) { 575 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { 576 DI->setLocation(D->getLocation()); 577 DI->EmitLabel(D, Builder); 578 } 579 } 580 581 incrementProfileCounter(D->getStmt()); 582 } 583 584 /// Change the cleanup scope of the labels in this lexical scope to 585 /// match the scope of the enclosing context. 586 void CodeGenFunction::LexicalScope::rescopeLabels() { 587 assert(!Labels.empty()); 588 EHScopeStack::stable_iterator innermostScope 589 = CGF.EHStack.getInnermostNormalCleanup(); 590 591 // Change the scope depth of all the labels. 592 for (SmallVectorImpl<const LabelDecl*>::const_iterator 593 i = Labels.begin(), e = Labels.end(); i != e; ++i) { 594 assert(CGF.LabelMap.count(*i)); 595 JumpDest &dest = CGF.LabelMap.find(*i)->second; 596 assert(dest.getScopeDepth().isValid()); 597 assert(innermostScope.encloses(dest.getScopeDepth())); 598 dest.setScopeDepth(innermostScope); 599 } 600 601 // Reparent the labels if the new scope also has cleanups. 602 if (innermostScope != EHScopeStack::stable_end() && ParentScope) { 603 ParentScope->Labels.append(Labels.begin(), Labels.end()); 604 } 605 } 606 607 608 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { 609 EmitLabel(S.getDecl()); 610 EmitStmt(S.getSubStmt()); 611 } 612 613 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { 614 bool nomerge = false; 615 for (const auto *A : S.getAttrs()) 616 if (A->getKind() == attr::NoMerge) { 617 nomerge = true; 618 break; 619 } 620 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge); 621 EmitStmt(S.getSubStmt(), S.getAttrs()); 622 } 623 624 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { 625 // If this code is reachable then emit a stop point (if generating 626 // debug info). We have to do this ourselves because we are on the 627 // "simple" statement path. 628 if (HaveInsertPoint()) 629 EmitStopPoint(&S); 630 631 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); 632 } 633 634 635 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { 636 if (const LabelDecl *Target = S.getConstantTarget()) { 637 EmitBranchThroughCleanup(getJumpDestForLabel(Target)); 638 return; 639 } 640 641 // Ensure that we have an i8* for our PHI node. 642 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), 643 Int8PtrTy, "addr"); 644 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 645 646 // Get the basic block for the indirect goto. 647 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); 648 649 // The first instruction in the block has to be the PHI for the switch dest, 650 // add an entry for this branch. 651 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); 652 653 EmitBranch(IndGotoBB); 654 } 655 656 void CodeGenFunction::EmitIfStmt(const IfStmt &S) { 657 // C99 6.8.4.1: The first substatement is executed if the expression compares 658 // unequal to 0. The condition must be a scalar type. 659 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); 660 661 if (S.getInit()) 662 EmitStmt(S.getInit()); 663 664 if (S.getConditionVariable()) 665 EmitDecl(*S.getConditionVariable()); 666 667 // If the condition constant folds and can be elided, try to avoid emitting 668 // the condition and the dead arm of the if/else. 669 bool CondConstant; 670 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, 671 S.isConstexpr())) { 672 // Figure out which block (then or else) is executed. 673 const Stmt *Executed = S.getThen(); 674 const Stmt *Skipped = S.getElse(); 675 if (!CondConstant) // Condition false? 676 std::swap(Executed, Skipped); 677 678 // If the skipped block has no labels in it, just emit the executed block. 679 // This avoids emitting dead code and simplifies the CFG substantially. 680 if (S.isConstexpr() || !ContainsLabel(Skipped)) { 681 if (CondConstant) 682 incrementProfileCounter(&S); 683 if (Executed) { 684 RunCleanupsScope ExecutedScope(*this); 685 EmitStmt(Executed); 686 } 687 return; 688 } 689 } 690 691 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit 692 // the conditional branch. 693 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); 694 llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); 695 llvm::BasicBlock *ElseBlock = ContBlock; 696 if (S.getElse()) 697 ElseBlock = createBasicBlock("if.else"); 698 699 // Prefer the PGO based weights over the likelihood attribute. 700 // When the build isn't optimized the metadata isn't used, so don't generate 701 // it. 702 Stmt::Likelihood LH = Stmt::LH_None; 703 uint64_t Count = getProfileCount(S.getThen()); 704 if (!Count && CGM.getCodeGenOpts().OptimizationLevel) 705 LH = Stmt::getLikelihood(S.getThen(), S.getElse()); 706 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH); 707 708 // Emit the 'then' code. 709 EmitBlock(ThenBlock); 710 incrementProfileCounter(&S); 711 { 712 RunCleanupsScope ThenScope(*this); 713 EmitStmt(S.getThen()); 714 } 715 EmitBranch(ContBlock); 716 717 // Emit the 'else' code if present. 718 if (const Stmt *Else = S.getElse()) { 719 { 720 // There is no need to emit line number for an unconditional branch. 721 auto NL = ApplyDebugLocation::CreateEmpty(*this); 722 EmitBlock(ElseBlock); 723 } 724 { 725 RunCleanupsScope ElseScope(*this); 726 EmitStmt(Else); 727 } 728 { 729 // There is no need to emit line number for an unconditional branch. 730 auto NL = ApplyDebugLocation::CreateEmpty(*this); 731 EmitBranch(ContBlock); 732 } 733 } 734 735 // Emit the continuation block for code after the if. 736 EmitBlock(ContBlock, true); 737 } 738 739 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, 740 ArrayRef<const Attr *> WhileAttrs) { 741 // Emit the header for the loop, which will also become 742 // the continue target. 743 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); 744 EmitBlock(LoopHeader.getBlock()); 745 746 const SourceRange &R = S.getSourceRange(); 747 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(), 748 WhileAttrs, SourceLocToDebugLoc(R.getBegin()), 749 SourceLocToDebugLoc(R.getEnd())); 750 751 // Create an exit block for when the condition fails, which will 752 // also become the break target. 753 JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); 754 755 // Store the blocks to use for break and continue. 756 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); 757 758 // C++ [stmt.while]p2: 759 // When the condition of a while statement is a declaration, the 760 // scope of the variable that is declared extends from its point 761 // of declaration (3.3.2) to the end of the while statement. 762 // [...] 763 // The object created in a condition is destroyed and created 764 // with each iteration of the loop. 765 RunCleanupsScope ConditionScope(*this); 766 767 if (S.getConditionVariable()) 768 EmitDecl(*S.getConditionVariable()); 769 770 // Evaluate the conditional in the while header. C99 6.8.5.1: The 771 // evaluation of the controlling expression takes place before each 772 // execution of the loop body. 773 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 774 775 // while(1) is common, avoid extra exit blocks. Be sure 776 // to correctly handle break/continue though. 777 bool EmitBoolCondBranch = true; 778 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 779 if (C->isOne()) 780 EmitBoolCondBranch = false; 781 782 // As long as the condition is true, go to the loop body. 783 llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); 784 if (EmitBoolCondBranch) { 785 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 786 if (ConditionScope.requiresCleanups()) 787 ExitBlock = createBasicBlock("while.exit"); 788 Builder.CreateCondBr( 789 BoolCondVal, LoopBody, ExitBlock, 790 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 791 792 if (ExitBlock != LoopExit.getBlock()) { 793 EmitBlock(ExitBlock); 794 EmitBranchThroughCleanup(LoopExit); 795 } 796 } 797 798 // Emit the loop body. We have to emit this in a cleanup scope 799 // because it might be a singleton DeclStmt. 800 { 801 RunCleanupsScope BodyScope(*this); 802 EmitBlock(LoopBody); 803 incrementProfileCounter(&S); 804 EmitStmt(S.getBody()); 805 } 806 807 BreakContinueStack.pop_back(); 808 809 // Immediately force cleanup. 810 ConditionScope.ForceCleanup(); 811 812 EmitStopPoint(&S); 813 // Branch to the loop header again. 814 EmitBranch(LoopHeader.getBlock()); 815 816 LoopStack.pop(); 817 818 // Emit the exit block. 819 EmitBlock(LoopExit.getBlock(), true); 820 821 // The LoopHeader typically is just a branch if we skipped emitting 822 // a branch, try to erase it. 823 if (!EmitBoolCondBranch) 824 SimplifyForwardingBlocks(LoopHeader.getBlock()); 825 } 826 827 void CodeGenFunction::EmitDoStmt(const DoStmt &S, 828 ArrayRef<const Attr *> DoAttrs) { 829 JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); 830 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); 831 832 uint64_t ParentCount = getCurrentProfileCount(); 833 834 // Store the blocks to use for break and continue. 835 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); 836 837 // Emit the body of the loop. 838 llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); 839 840 EmitBlockWithFallThrough(LoopBody, &S); 841 { 842 RunCleanupsScope BodyScope(*this); 843 EmitStmt(S.getBody()); 844 } 845 846 EmitBlock(LoopCond.getBlock()); 847 848 const SourceRange &R = S.getSourceRange(); 849 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs, 850 SourceLocToDebugLoc(R.getBegin()), 851 SourceLocToDebugLoc(R.getEnd())); 852 853 // C99 6.8.5.2: "The evaluation of the controlling expression takes place 854 // after each execution of the loop body." 855 856 // Evaluate the conditional in the while header. 857 // C99 6.8.5p2/p4: The first substatement is executed if the expression 858 // compares unequal to 0. The condition must be a scalar type. 859 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 860 861 BreakContinueStack.pop_back(); 862 863 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure 864 // to correctly handle break/continue though. 865 bool EmitBoolCondBranch = true; 866 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) 867 if (C->isZero()) 868 EmitBoolCondBranch = false; 869 870 // As long as the condition is true, iterate the loop. 871 if (EmitBoolCondBranch) { 872 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; 873 Builder.CreateCondBr( 874 BoolCondVal, LoopBody, LoopExit.getBlock(), 875 createProfileWeightsForLoop(S.getCond(), BackedgeCount)); 876 } 877 878 LoopStack.pop(); 879 880 // Emit the exit block. 881 EmitBlock(LoopExit.getBlock()); 882 883 // The DoCond block typically is just a branch if we skipped 884 // emitting a branch, try to erase it. 885 if (!EmitBoolCondBranch) 886 SimplifyForwardingBlocks(LoopCond.getBlock()); 887 } 888 889 void CodeGenFunction::EmitForStmt(const ForStmt &S, 890 ArrayRef<const Attr *> ForAttrs) { 891 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 892 893 LexicalScope ForScope(*this, S.getSourceRange()); 894 895 // Evaluate the first part before the loop. 896 if (S.getInit()) 897 EmitStmt(S.getInit()); 898 899 // Start the loop with a block that tests the condition. 900 // If there's an increment, the continue scope will be overwritten 901 // later. 902 JumpDest Continue = getJumpDestInCurrentScope("for.cond"); 903 llvm::BasicBlock *CondBlock = Continue.getBlock(); 904 EmitBlock(CondBlock); 905 906 const SourceRange &R = S.getSourceRange(); 907 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, 908 SourceLocToDebugLoc(R.getBegin()), 909 SourceLocToDebugLoc(R.getEnd())); 910 911 // If the for loop doesn't have an increment we can just use the 912 // condition as the continue block. Otherwise we'll need to create 913 // a block for it (in the current scope, i.e. in the scope of the 914 // condition), and that we will become our continue block. 915 if (S.getInc()) 916 Continue = getJumpDestInCurrentScope("for.inc"); 917 918 // Store the blocks to use for break and continue. 919 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 920 921 // Create a cleanup scope for the condition variable cleanups. 922 LexicalScope ConditionScope(*this, S.getSourceRange()); 923 924 if (S.getCond()) { 925 // If the for statement has a condition scope, emit the local variable 926 // declaration. 927 if (S.getConditionVariable()) { 928 EmitDecl(*S.getConditionVariable()); 929 } 930 931 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 932 // If there are any cleanups between here and the loop-exit scope, 933 // create a block to stage a loop exit along. 934 if (ForScope.requiresCleanups()) 935 ExitBlock = createBasicBlock("for.cond.cleanup"); 936 937 // As long as the condition is true, iterate the loop. 938 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 939 940 // C99 6.8.5p2/p4: The first substatement is executed if the expression 941 // compares unequal to 0. The condition must be a scalar type. 942 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 943 Builder.CreateCondBr( 944 BoolCondVal, ForBody, ExitBlock, 945 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 946 947 if (ExitBlock != LoopExit.getBlock()) { 948 EmitBlock(ExitBlock); 949 EmitBranchThroughCleanup(LoopExit); 950 } 951 952 EmitBlock(ForBody); 953 } else { 954 // Treat it as a non-zero constant. Don't even create a new block for the 955 // body, just fall into it. 956 } 957 incrementProfileCounter(&S); 958 959 { 960 // Create a separate cleanup scope for the body, in case it is not 961 // a compound statement. 962 RunCleanupsScope BodyScope(*this); 963 EmitStmt(S.getBody()); 964 } 965 966 // If there is an increment, emit it next. 967 if (S.getInc()) { 968 EmitBlock(Continue.getBlock()); 969 EmitStmt(S.getInc()); 970 } 971 972 BreakContinueStack.pop_back(); 973 974 ConditionScope.ForceCleanup(); 975 976 EmitStopPoint(&S); 977 EmitBranch(CondBlock); 978 979 ForScope.ForceCleanup(); 980 981 LoopStack.pop(); 982 983 // Emit the fall-through block. 984 EmitBlock(LoopExit.getBlock(), true); 985 } 986 987 void 988 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, 989 ArrayRef<const Attr *> ForAttrs) { 990 JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); 991 992 LexicalScope ForScope(*this, S.getSourceRange()); 993 994 // Evaluate the first pieces before the loop. 995 if (S.getInit()) 996 EmitStmt(S.getInit()); 997 EmitStmt(S.getRangeStmt()); 998 EmitStmt(S.getBeginStmt()); 999 EmitStmt(S.getEndStmt()); 1000 1001 // Start the loop with a block that tests the condition. 1002 // If there's an increment, the continue scope will be overwritten 1003 // later. 1004 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 1005 EmitBlock(CondBlock); 1006 1007 const SourceRange &R = S.getSourceRange(); 1008 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, 1009 SourceLocToDebugLoc(R.getBegin()), 1010 SourceLocToDebugLoc(R.getEnd())); 1011 1012 // If there are any cleanups between here and the loop-exit scope, 1013 // create a block to stage a loop exit along. 1014 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1015 if (ForScope.requiresCleanups()) 1016 ExitBlock = createBasicBlock("for.cond.cleanup"); 1017 1018 // The loop body, consisting of the specified body and the loop variable. 1019 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 1020 1021 // The body is executed if the expression, contextually converted 1022 // to bool, is true. 1023 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); 1024 Builder.CreateCondBr( 1025 BoolCondVal, ForBody, ExitBlock, 1026 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); 1027 1028 if (ExitBlock != LoopExit.getBlock()) { 1029 EmitBlock(ExitBlock); 1030 EmitBranchThroughCleanup(LoopExit); 1031 } 1032 1033 EmitBlock(ForBody); 1034 incrementProfileCounter(&S); 1035 1036 // Create a block for the increment. In case of a 'continue', we jump there. 1037 JumpDest Continue = getJumpDestInCurrentScope("for.inc"); 1038 1039 // Store the blocks to use for break and continue. 1040 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1041 1042 { 1043 // Create a separate cleanup scope for the loop variable and body. 1044 LexicalScope BodyScope(*this, S.getSourceRange()); 1045 EmitStmt(S.getLoopVarStmt()); 1046 EmitStmt(S.getBody()); 1047 } 1048 1049 EmitStopPoint(&S); 1050 // If there is an increment, emit it next. 1051 EmitBlock(Continue.getBlock()); 1052 EmitStmt(S.getInc()); 1053 1054 BreakContinueStack.pop_back(); 1055 1056 EmitBranch(CondBlock); 1057 1058 ForScope.ForceCleanup(); 1059 1060 LoopStack.pop(); 1061 1062 // Emit the fall-through block. 1063 EmitBlock(LoopExit.getBlock(), true); 1064 } 1065 1066 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { 1067 if (RV.isScalar()) { 1068 Builder.CreateStore(RV.getScalarVal(), ReturnValue); 1069 } else if (RV.isAggregate()) { 1070 LValue Dest = MakeAddrLValue(ReturnValue, Ty); 1071 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty); 1072 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); 1073 } else { 1074 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty), 1075 /*init*/ true); 1076 } 1077 EmitBranchThroughCleanup(ReturnBlock); 1078 } 1079 1080 namespace { 1081 // RAII struct used to save and restore a return statment's result expression. 1082 struct SaveRetExprRAII { 1083 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF) 1084 : OldRetExpr(CGF.RetExpr), CGF(CGF) { 1085 CGF.RetExpr = RetExpr; 1086 } 1087 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; } 1088 const Expr *OldRetExpr; 1089 CodeGenFunction &CGF; 1090 }; 1091 } // namespace 1092 1093 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand 1094 /// if the function returns void, or may be missing one if the function returns 1095 /// non-void. Fun stuff :). 1096 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { 1097 if (requiresReturnValueCheck()) { 1098 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc()); 1099 auto *SLocPtr = 1100 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false, 1101 llvm::GlobalVariable::PrivateLinkage, SLoc); 1102 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1103 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr); 1104 assert(ReturnLocation.isValid() && "No valid return location"); 1105 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy), 1106 ReturnLocation); 1107 } 1108 1109 // Returning from an outlined SEH helper is UB, and we already warn on it. 1110 if (IsOutlinedSEHHelper) { 1111 Builder.CreateUnreachable(); 1112 Builder.ClearInsertionPoint(); 1113 } 1114 1115 // Emit the result value, even if unused, to evaluate the side effects. 1116 const Expr *RV = S.getRetValue(); 1117 1118 // Record the result expression of the return statement. The recorded 1119 // expression is used to determine whether a block capture's lifetime should 1120 // end at the end of the full expression as opposed to the end of the scope 1121 // enclosing the block expression. 1122 // 1123 // This permits a small, easily-implemented exception to our over-conservative 1124 // rules about not jumping to statements following block literals with 1125 // non-trivial cleanups. 1126 SaveRetExprRAII SaveRetExpr(RV, *this); 1127 1128 RunCleanupsScope cleanupScope(*this); 1129 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV)) 1130 RV = EWC->getSubExpr(); 1131 // FIXME: Clean this up by using an LValue for ReturnTemp, 1132 // EmitStoreThroughLValue, and EmitAnyExpr. 1133 // Check if the NRVO candidate was not globalized in OpenMP mode. 1134 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() && 1135 S.getNRVOCandidate()->isNRVOVariable() && 1136 (!getLangOpts().OpenMP || 1137 !CGM.getOpenMPRuntime() 1138 .getAddressOfLocalVariable(*this, S.getNRVOCandidate()) 1139 .isValid())) { 1140 // Apply the named return value optimization for this return statement, 1141 // which means doing nothing: the appropriate result has already been 1142 // constructed into the NRVO variable. 1143 1144 // If there is an NRVO flag for this variable, set it to 1 into indicate 1145 // that the cleanup code should not destroy the variable. 1146 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) 1147 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag); 1148 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { 1149 // Make sure not to return anything, but evaluate the expression 1150 // for side effects. 1151 if (RV) 1152 EmitAnyExpr(RV); 1153 } else if (!RV) { 1154 // Do nothing (return value is left uninitialized) 1155 } else if (FnRetTy->isReferenceType()) { 1156 // If this function returns a reference, take the address of the expression 1157 // rather than the value. 1158 RValue Result = EmitReferenceBindingToExpr(RV); 1159 Builder.CreateStore(Result.getScalarVal(), ReturnValue); 1160 } else { 1161 switch (getEvaluationKind(RV->getType())) { 1162 case TEK_Scalar: 1163 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); 1164 break; 1165 case TEK_Complex: 1166 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()), 1167 /*isInit*/ true); 1168 break; 1169 case TEK_Aggregate: 1170 EmitAggExpr(RV, AggValueSlot::forAddr( 1171 ReturnValue, Qualifiers(), 1172 AggValueSlot::IsDestructed, 1173 AggValueSlot::DoesNotNeedGCBarriers, 1174 AggValueSlot::IsNotAliased, 1175 getOverlapForReturnValue())); 1176 break; 1177 } 1178 } 1179 1180 ++NumReturnExprs; 1181 if (!RV || RV->isEvaluatable(getContext())) 1182 ++NumSimpleReturnExprs; 1183 1184 cleanupScope.ForceCleanup(); 1185 EmitBranchThroughCleanup(ReturnBlock); 1186 } 1187 1188 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { 1189 // As long as debug info is modeled with instructions, we have to ensure we 1190 // have a place to insert here and write the stop point here. 1191 if (HaveInsertPoint()) 1192 EmitStopPoint(&S); 1193 1194 for (const auto *I : S.decls()) 1195 EmitDecl(*I); 1196 } 1197 1198 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { 1199 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); 1200 1201 // If this code is reachable then emit a stop point (if generating 1202 // debug info). We have to do this ourselves because we are on the 1203 // "simple" statement path. 1204 if (HaveInsertPoint()) 1205 EmitStopPoint(&S); 1206 1207 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); 1208 } 1209 1210 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { 1211 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); 1212 1213 // If this code is reachable then emit a stop point (if generating 1214 // debug info). We have to do this ourselves because we are on the 1215 // "simple" statement path. 1216 if (HaveInsertPoint()) 1217 EmitStopPoint(&S); 1218 1219 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); 1220 } 1221 1222 /// EmitCaseStmtRange - If case statement range is not too big then 1223 /// add multiple cases to switch instruction, one for each value within 1224 /// the range. If range is too big then emit "if" condition check. 1225 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { 1226 assert(S.getRHS() && "Expected RHS value in CaseStmt"); 1227 1228 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); 1229 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); 1230 1231 // Emit the code for this case. We do this first to make sure it is 1232 // properly chained from our predecessor before generating the 1233 // switch machinery to enter this block. 1234 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1235 EmitBlockWithFallThrough(CaseDest, &S); 1236 EmitStmt(S.getSubStmt()); 1237 1238 // If range is empty, do nothing. 1239 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) 1240 return; 1241 1242 llvm::APInt Range = RHS - LHS; 1243 // FIXME: parameters such as this should not be hardcoded. 1244 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { 1245 // Range is small enough to add multiple switch instruction cases. 1246 uint64_t Total = getProfileCount(&S); 1247 unsigned NCases = Range.getZExtValue() + 1; 1248 // We only have one region counter for the entire set of cases here, so we 1249 // need to divide the weights evenly between the generated cases, ensuring 1250 // that the total weight is preserved. E.g., a weight of 5 over three cases 1251 // will be distributed as weights of 2, 2, and 1. 1252 uint64_t Weight = Total / NCases, Rem = Total % NCases; 1253 for (unsigned I = 0; I != NCases; ++I) { 1254 if (SwitchWeights) 1255 SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); 1256 if (Rem) 1257 Rem--; 1258 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); 1259 ++LHS; 1260 } 1261 return; 1262 } 1263 1264 // The range is too big. Emit "if" condition into a new block, 1265 // making sure to save and restore the current insertion point. 1266 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); 1267 1268 // Push this test onto the chain of range checks (which terminates 1269 // in the default basic block). The switch's default will be changed 1270 // to the top of this chain after switch emission is complete. 1271 llvm::BasicBlock *FalseDest = CaseRangeBlock; 1272 CaseRangeBlock = createBasicBlock("sw.caserange"); 1273 1274 CurFn->getBasicBlockList().push_back(CaseRangeBlock); 1275 Builder.SetInsertPoint(CaseRangeBlock); 1276 1277 // Emit range check. 1278 llvm::Value *Diff = 1279 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); 1280 llvm::Value *Cond = 1281 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); 1282 1283 llvm::MDNode *Weights = nullptr; 1284 if (SwitchWeights) { 1285 uint64_t ThisCount = getProfileCount(&S); 1286 uint64_t DefaultCount = (*SwitchWeights)[0]; 1287 Weights = createProfileWeights(ThisCount, DefaultCount); 1288 1289 // Since we're chaining the switch default through each large case range, we 1290 // need to update the weight for the default, ie, the first case, to include 1291 // this case. 1292 (*SwitchWeights)[0] += ThisCount; 1293 } 1294 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); 1295 1296 // Restore the appropriate insertion point. 1297 if (RestoreBB) 1298 Builder.SetInsertPoint(RestoreBB); 1299 else 1300 Builder.ClearInsertionPoint(); 1301 } 1302 1303 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { 1304 // If there is no enclosing switch instance that we're aware of, then this 1305 // case statement and its block can be elided. This situation only happens 1306 // when we've constant-folded the switch, are emitting the constant case, 1307 // and part of the constant case includes another case statement. For 1308 // instance: switch (4) { case 4: do { case 5: } while (1); } 1309 if (!SwitchInsn) { 1310 EmitStmt(S.getSubStmt()); 1311 return; 1312 } 1313 1314 // Handle case ranges. 1315 if (S.getRHS()) { 1316 EmitCaseStmtRange(S); 1317 return; 1318 } 1319 1320 llvm::ConstantInt *CaseVal = 1321 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); 1322 1323 // If the body of the case is just a 'break', try to not emit an empty block. 1324 // If we're profiling or we're not optimizing, leave the block in for better 1325 // debug and coverage analysis. 1326 if (!CGM.getCodeGenOpts().hasProfileClangInstr() && 1327 CGM.getCodeGenOpts().OptimizationLevel > 0 && 1328 isa<BreakStmt>(S.getSubStmt())) { 1329 JumpDest Block = BreakContinueStack.back().BreakBlock; 1330 1331 // Only do this optimization if there are no cleanups that need emitting. 1332 if (isObviouslyBranchWithoutCleanups(Block)) { 1333 if (SwitchWeights) 1334 SwitchWeights->push_back(getProfileCount(&S)); 1335 SwitchInsn->addCase(CaseVal, Block.getBlock()); 1336 1337 // If there was a fallthrough into this case, make sure to redirect it to 1338 // the end of the switch as well. 1339 if (Builder.GetInsertBlock()) { 1340 Builder.CreateBr(Block.getBlock()); 1341 Builder.ClearInsertionPoint(); 1342 } 1343 return; 1344 } 1345 } 1346 1347 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); 1348 EmitBlockWithFallThrough(CaseDest, &S); 1349 if (SwitchWeights) 1350 SwitchWeights->push_back(getProfileCount(&S)); 1351 SwitchInsn->addCase(CaseVal, CaseDest); 1352 1353 // Recursively emitting the statement is acceptable, but is not wonderful for 1354 // code where we have many case statements nested together, i.e.: 1355 // case 1: 1356 // case 2: 1357 // case 3: etc. 1358 // Handling this recursively will create a new block for each case statement 1359 // that falls through to the next case which is IR intensive. It also causes 1360 // deep recursion which can run into stack depth limitations. Handle 1361 // sequential non-range case statements specially. 1362 const CaseStmt *CurCase = &S; 1363 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); 1364 1365 // Otherwise, iteratively add consecutive cases to this switch stmt. 1366 while (NextCase && NextCase->getRHS() == nullptr) { 1367 CurCase = NextCase; 1368 llvm::ConstantInt *CaseVal = 1369 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); 1370 1371 if (SwitchWeights) 1372 SwitchWeights->push_back(getProfileCount(NextCase)); 1373 if (CGM.getCodeGenOpts().hasProfileClangInstr()) { 1374 CaseDest = createBasicBlock("sw.bb"); 1375 EmitBlockWithFallThrough(CaseDest, &S); 1376 } 1377 1378 SwitchInsn->addCase(CaseVal, CaseDest); 1379 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); 1380 } 1381 1382 // Normal default recursion for non-cases. 1383 EmitStmt(CurCase->getSubStmt()); 1384 } 1385 1386 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { 1387 // If there is no enclosing switch instance that we're aware of, then this 1388 // default statement can be elided. This situation only happens when we've 1389 // constant-folded the switch. 1390 if (!SwitchInsn) { 1391 EmitStmt(S.getSubStmt()); 1392 return; 1393 } 1394 1395 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); 1396 assert(DefaultBlock->empty() && 1397 "EmitDefaultStmt: Default block already defined?"); 1398 1399 EmitBlockWithFallThrough(DefaultBlock, &S); 1400 1401 EmitStmt(S.getSubStmt()); 1402 } 1403 1404 /// CollectStatementsForCase - Given the body of a 'switch' statement and a 1405 /// constant value that is being switched on, see if we can dead code eliminate 1406 /// the body of the switch to a simple series of statements to emit. Basically, 1407 /// on a switch (5) we want to find these statements: 1408 /// case 5: 1409 /// printf(...); <-- 1410 /// ++i; <-- 1411 /// break; 1412 /// 1413 /// and add them to the ResultStmts vector. If it is unsafe to do this 1414 /// transformation (for example, one of the elided statements contains a label 1415 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' 1416 /// should include statements after it (e.g. the printf() line is a substmt of 1417 /// the case) then return CSFC_FallThrough. If we handled it and found a break 1418 /// statement, then return CSFC_Success. 1419 /// 1420 /// If Case is non-null, then we are looking for the specified case, checking 1421 /// that nothing we jump over contains labels. If Case is null, then we found 1422 /// the case and are looking for the break. 1423 /// 1424 /// If the recursive walk actually finds our Case, then we set FoundCase to 1425 /// true. 1426 /// 1427 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; 1428 static CSFC_Result CollectStatementsForCase(const Stmt *S, 1429 const SwitchCase *Case, 1430 bool &FoundCase, 1431 SmallVectorImpl<const Stmt*> &ResultStmts) { 1432 // If this is a null statement, just succeed. 1433 if (!S) 1434 return Case ? CSFC_Success : CSFC_FallThrough; 1435 1436 // If this is the switchcase (case 4: or default) that we're looking for, then 1437 // we're in business. Just add the substatement. 1438 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { 1439 if (S == Case) { 1440 FoundCase = true; 1441 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, 1442 ResultStmts); 1443 } 1444 1445 // Otherwise, this is some other case or default statement, just ignore it. 1446 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, 1447 ResultStmts); 1448 } 1449 1450 // If we are in the live part of the code and we found our break statement, 1451 // return a success! 1452 if (!Case && isa<BreakStmt>(S)) 1453 return CSFC_Success; 1454 1455 // If this is a switch statement, then it might contain the SwitchCase, the 1456 // break, or neither. 1457 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { 1458 // Handle this as two cases: we might be looking for the SwitchCase (if so 1459 // the skipped statements must be skippable) or we might already have it. 1460 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); 1461 bool StartedInLiveCode = FoundCase; 1462 unsigned StartSize = ResultStmts.size(); 1463 1464 // If we've not found the case yet, scan through looking for it. 1465 if (Case) { 1466 // Keep track of whether we see a skipped declaration. The code could be 1467 // using the declaration even if it is skipped, so we can't optimize out 1468 // the decl if the kept statements might refer to it. 1469 bool HadSkippedDecl = false; 1470 1471 // If we're looking for the case, just see if we can skip each of the 1472 // substatements. 1473 for (; Case && I != E; ++I) { 1474 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I); 1475 1476 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { 1477 case CSFC_Failure: return CSFC_Failure; 1478 case CSFC_Success: 1479 // A successful result means that either 1) that the statement doesn't 1480 // have the case and is skippable, or 2) does contain the case value 1481 // and also contains the break to exit the switch. In the later case, 1482 // we just verify the rest of the statements are elidable. 1483 if (FoundCase) { 1484 // If we found the case and skipped declarations, we can't do the 1485 // optimization. 1486 if (HadSkippedDecl) 1487 return CSFC_Failure; 1488 1489 for (++I; I != E; ++I) 1490 if (CodeGenFunction::ContainsLabel(*I, true)) 1491 return CSFC_Failure; 1492 return CSFC_Success; 1493 } 1494 break; 1495 case CSFC_FallThrough: 1496 // If we have a fallthrough condition, then we must have found the 1497 // case started to include statements. Consider the rest of the 1498 // statements in the compound statement as candidates for inclusion. 1499 assert(FoundCase && "Didn't find case but returned fallthrough?"); 1500 // We recursively found Case, so we're not looking for it anymore. 1501 Case = nullptr; 1502 1503 // If we found the case and skipped declarations, we can't do the 1504 // optimization. 1505 if (HadSkippedDecl) 1506 return CSFC_Failure; 1507 break; 1508 } 1509 } 1510 1511 if (!FoundCase) 1512 return CSFC_Success; 1513 1514 assert(!HadSkippedDecl && "fallthrough after skipping decl"); 1515 } 1516 1517 // If we have statements in our range, then we know that the statements are 1518 // live and need to be added to the set of statements we're tracking. 1519 bool AnyDecls = false; 1520 for (; I != E; ++I) { 1521 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I); 1522 1523 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { 1524 case CSFC_Failure: return CSFC_Failure; 1525 case CSFC_FallThrough: 1526 // A fallthrough result means that the statement was simple and just 1527 // included in ResultStmt, keep adding them afterwards. 1528 break; 1529 case CSFC_Success: 1530 // A successful result means that we found the break statement and 1531 // stopped statement inclusion. We just ensure that any leftover stmts 1532 // are skippable and return success ourselves. 1533 for (++I; I != E; ++I) 1534 if (CodeGenFunction::ContainsLabel(*I, true)) 1535 return CSFC_Failure; 1536 return CSFC_Success; 1537 } 1538 } 1539 1540 // If we're about to fall out of a scope without hitting a 'break;', we 1541 // can't perform the optimization if there were any decls in that scope 1542 // (we'd lose their end-of-lifetime). 1543 if (AnyDecls) { 1544 // If the entire compound statement was live, there's one more thing we 1545 // can try before giving up: emit the whole thing as a single statement. 1546 // We can do that unless the statement contains a 'break;'. 1547 // FIXME: Such a break must be at the end of a construct within this one. 1548 // We could emit this by just ignoring the BreakStmts entirely. 1549 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) { 1550 ResultStmts.resize(StartSize); 1551 ResultStmts.push_back(S); 1552 } else { 1553 return CSFC_Failure; 1554 } 1555 } 1556 1557 return CSFC_FallThrough; 1558 } 1559 1560 // Okay, this is some other statement that we don't handle explicitly, like a 1561 // for statement or increment etc. If we are skipping over this statement, 1562 // just verify it doesn't have labels, which would make it invalid to elide. 1563 if (Case) { 1564 if (CodeGenFunction::ContainsLabel(S, true)) 1565 return CSFC_Failure; 1566 return CSFC_Success; 1567 } 1568 1569 // Otherwise, we want to include this statement. Everything is cool with that 1570 // so long as it doesn't contain a break out of the switch we're in. 1571 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; 1572 1573 // Otherwise, everything is great. Include the statement and tell the caller 1574 // that we fall through and include the next statement as well. 1575 ResultStmts.push_back(S); 1576 return CSFC_FallThrough; 1577 } 1578 1579 /// FindCaseStatementsForValue - Find the case statement being jumped to and 1580 /// then invoke CollectStatementsForCase to find the list of statements to emit 1581 /// for a switch on constant. See the comment above CollectStatementsForCase 1582 /// for more details. 1583 static bool FindCaseStatementsForValue(const SwitchStmt &S, 1584 const llvm::APSInt &ConstantCondValue, 1585 SmallVectorImpl<const Stmt*> &ResultStmts, 1586 ASTContext &C, 1587 const SwitchCase *&ResultCase) { 1588 // First step, find the switch case that is being branched to. We can do this 1589 // efficiently by scanning the SwitchCase list. 1590 const SwitchCase *Case = S.getSwitchCaseList(); 1591 const DefaultStmt *DefaultCase = nullptr; 1592 1593 for (; Case; Case = Case->getNextSwitchCase()) { 1594 // It's either a default or case. Just remember the default statement in 1595 // case we're not jumping to any numbered cases. 1596 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { 1597 DefaultCase = DS; 1598 continue; 1599 } 1600 1601 // Check to see if this case is the one we're looking for. 1602 const CaseStmt *CS = cast<CaseStmt>(Case); 1603 // Don't handle case ranges yet. 1604 if (CS->getRHS()) return false; 1605 1606 // If we found our case, remember it as 'case'. 1607 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) 1608 break; 1609 } 1610 1611 // If we didn't find a matching case, we use a default if it exists, or we 1612 // elide the whole switch body! 1613 if (!Case) { 1614 // It is safe to elide the body of the switch if it doesn't contain labels 1615 // etc. If it is safe, return successfully with an empty ResultStmts list. 1616 if (!DefaultCase) 1617 return !CodeGenFunction::ContainsLabel(&S); 1618 Case = DefaultCase; 1619 } 1620 1621 // Ok, we know which case is being jumped to, try to collect all the 1622 // statements that follow it. This can fail for a variety of reasons. Also, 1623 // check to see that the recursive walk actually found our case statement. 1624 // Insane cases like this can fail to find it in the recursive walk since we 1625 // don't handle every stmt kind: 1626 // switch (4) { 1627 // while (1) { 1628 // case 4: ... 1629 bool FoundCase = false; 1630 ResultCase = Case; 1631 return CollectStatementsForCase(S.getBody(), Case, FoundCase, 1632 ResultStmts) != CSFC_Failure && 1633 FoundCase; 1634 } 1635 1636 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { 1637 // Handle nested switch statements. 1638 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; 1639 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; 1640 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; 1641 1642 // See if we can constant fold the condition of the switch and therefore only 1643 // emit the live case statement (if any) of the switch. 1644 llvm::APSInt ConstantCondValue; 1645 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { 1646 SmallVector<const Stmt*, 4> CaseStmts; 1647 const SwitchCase *Case = nullptr; 1648 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, 1649 getContext(), Case)) { 1650 if (Case) 1651 incrementProfileCounter(Case); 1652 RunCleanupsScope ExecutedScope(*this); 1653 1654 if (S.getInit()) 1655 EmitStmt(S.getInit()); 1656 1657 // Emit the condition variable if needed inside the entire cleanup scope 1658 // used by this special case for constant folded switches. 1659 if (S.getConditionVariable()) 1660 EmitDecl(*S.getConditionVariable()); 1661 1662 // At this point, we are no longer "within" a switch instance, so 1663 // we can temporarily enforce this to ensure that any embedded case 1664 // statements are not emitted. 1665 SwitchInsn = nullptr; 1666 1667 // Okay, we can dead code eliminate everything except this case. Emit the 1668 // specified series of statements and we're good. 1669 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) 1670 EmitStmt(CaseStmts[i]); 1671 incrementProfileCounter(&S); 1672 1673 // Now we want to restore the saved switch instance so that nested 1674 // switches continue to function properly 1675 SwitchInsn = SavedSwitchInsn; 1676 1677 return; 1678 } 1679 } 1680 1681 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); 1682 1683 RunCleanupsScope ConditionScope(*this); 1684 1685 if (S.getInit()) 1686 EmitStmt(S.getInit()); 1687 1688 if (S.getConditionVariable()) 1689 EmitDecl(*S.getConditionVariable()); 1690 llvm::Value *CondV = EmitScalarExpr(S.getCond()); 1691 1692 // Create basic block to hold stuff that comes after switch 1693 // statement. We also need to create a default block now so that 1694 // explicit case ranges tests can have a place to jump to on 1695 // failure. 1696 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); 1697 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); 1698 if (PGO.haveRegionCounts()) { 1699 // Walk the SwitchCase list to find how many there are. 1700 uint64_t DefaultCount = 0; 1701 unsigned NumCases = 0; 1702 for (const SwitchCase *Case = S.getSwitchCaseList(); 1703 Case; 1704 Case = Case->getNextSwitchCase()) { 1705 if (isa<DefaultStmt>(Case)) 1706 DefaultCount = getProfileCount(Case); 1707 NumCases += 1; 1708 } 1709 SwitchWeights = new SmallVector<uint64_t, 16>(); 1710 SwitchWeights->reserve(NumCases); 1711 // The default needs to be first. We store the edge count, so we already 1712 // know the right weight. 1713 SwitchWeights->push_back(DefaultCount); 1714 } 1715 CaseRangeBlock = DefaultBlock; 1716 1717 // Clear the insertion point to indicate we are in unreachable code. 1718 Builder.ClearInsertionPoint(); 1719 1720 // All break statements jump to NextBlock. If BreakContinueStack is non-empty 1721 // then reuse last ContinueBlock. 1722 JumpDest OuterContinue; 1723 if (!BreakContinueStack.empty()) 1724 OuterContinue = BreakContinueStack.back().ContinueBlock; 1725 1726 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); 1727 1728 // Emit switch body. 1729 EmitStmt(S.getBody()); 1730 1731 BreakContinueStack.pop_back(); 1732 1733 // Update the default block in case explicit case range tests have 1734 // been chained on top. 1735 SwitchInsn->setDefaultDest(CaseRangeBlock); 1736 1737 // If a default was never emitted: 1738 if (!DefaultBlock->getParent()) { 1739 // If we have cleanups, emit the default block so that there's a 1740 // place to jump through the cleanups from. 1741 if (ConditionScope.requiresCleanups()) { 1742 EmitBlock(DefaultBlock); 1743 1744 // Otherwise, just forward the default block to the switch end. 1745 } else { 1746 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); 1747 delete DefaultBlock; 1748 } 1749 } 1750 1751 ConditionScope.ForceCleanup(); 1752 1753 // Emit continuation. 1754 EmitBlock(SwitchExit.getBlock(), true); 1755 incrementProfileCounter(&S); 1756 1757 // If the switch has a condition wrapped by __builtin_unpredictable, 1758 // create metadata that specifies that the switch is unpredictable. 1759 // Don't bother if not optimizing because that metadata would not be used. 1760 auto *Call = dyn_cast<CallExpr>(S.getCond()); 1761 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1762 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1763 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1764 llvm::MDBuilder MDHelper(getLLVMContext()); 1765 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable, 1766 MDHelper.createUnpredictable()); 1767 } 1768 } 1769 1770 if (SwitchWeights) { 1771 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && 1772 "switch weights do not match switch cases"); 1773 // If there's only one jump destination there's no sense weighting it. 1774 if (SwitchWeights->size() > 1) 1775 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, 1776 createProfileWeights(*SwitchWeights)); 1777 delete SwitchWeights; 1778 } 1779 SwitchInsn = SavedSwitchInsn; 1780 SwitchWeights = SavedSwitchWeights; 1781 CaseRangeBlock = SavedCRBlock; 1782 } 1783 1784 static std::string 1785 SimplifyConstraint(const char *Constraint, const TargetInfo &Target, 1786 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { 1787 std::string Result; 1788 1789 while (*Constraint) { 1790 switch (*Constraint) { 1791 default: 1792 Result += Target.convertConstraint(Constraint); 1793 break; 1794 // Ignore these 1795 case '*': 1796 case '?': 1797 case '!': 1798 case '=': // Will see this and the following in mult-alt constraints. 1799 case '+': 1800 break; 1801 case '#': // Ignore the rest of the constraint alternative. 1802 while (Constraint[1] && Constraint[1] != ',') 1803 Constraint++; 1804 break; 1805 case '&': 1806 case '%': 1807 Result += *Constraint; 1808 while (Constraint[1] && Constraint[1] == *Constraint) 1809 Constraint++; 1810 break; 1811 case ',': 1812 Result += "|"; 1813 break; 1814 case 'g': 1815 Result += "imr"; 1816 break; 1817 case '[': { 1818 assert(OutCons && 1819 "Must pass output names to constraints with a symbolic name"); 1820 unsigned Index; 1821 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); 1822 assert(result && "Could not resolve symbolic name"); (void)result; 1823 Result += llvm::utostr(Index); 1824 break; 1825 } 1826 } 1827 1828 Constraint++; 1829 } 1830 1831 return Result; 1832 } 1833 1834 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared 1835 /// as using a particular register add that as a constraint that will be used 1836 /// in this asm stmt. 1837 static std::string 1838 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, 1839 const TargetInfo &Target, CodeGenModule &CGM, 1840 const AsmStmt &Stmt, const bool EarlyClobber, 1841 std::string *GCCReg = nullptr) { 1842 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); 1843 if (!AsmDeclRef) 1844 return Constraint; 1845 const ValueDecl &Value = *AsmDeclRef->getDecl(); 1846 const VarDecl *Variable = dyn_cast<VarDecl>(&Value); 1847 if (!Variable) 1848 return Constraint; 1849 if (Variable->getStorageClass() != SC_Register) 1850 return Constraint; 1851 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); 1852 if (!Attr) 1853 return Constraint; 1854 StringRef Register = Attr->getLabel(); 1855 assert(Target.isValidGCCRegisterName(Register)); 1856 // We're using validateOutputConstraint here because we only care if 1857 // this is a register constraint. 1858 TargetInfo::ConstraintInfo Info(Constraint, ""); 1859 if (Target.validateOutputConstraint(Info) && 1860 !Info.allowsRegister()) { 1861 CGM.ErrorUnsupported(&Stmt, "__asm__"); 1862 return Constraint; 1863 } 1864 // Canonicalize the register here before returning it. 1865 Register = Target.getNormalizedGCCRegisterName(Register); 1866 if (GCCReg != nullptr) 1867 *GCCReg = Register.str(); 1868 return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; 1869 } 1870 1871 llvm::Value* 1872 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, 1873 LValue InputValue, QualType InputType, 1874 std::string &ConstraintStr, 1875 SourceLocation Loc) { 1876 llvm::Value *Arg; 1877 if (Info.allowsRegister() || !Info.allowsMemory()) { 1878 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { 1879 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal(); 1880 } else { 1881 llvm::Type *Ty = ConvertType(InputType); 1882 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); 1883 if (Size <= 64 && llvm::isPowerOf2_64(Size)) { 1884 Ty = llvm::IntegerType::get(getLLVMContext(), Size); 1885 Ty = llvm::PointerType::getUnqual(Ty); 1886 1887 Arg = Builder.CreateLoad( 1888 Builder.CreateBitCast(InputValue.getAddress(*this), Ty)); 1889 } else { 1890 Arg = InputValue.getPointer(*this); 1891 ConstraintStr += '*'; 1892 } 1893 } 1894 } else { 1895 Arg = InputValue.getPointer(*this); 1896 ConstraintStr += '*'; 1897 } 1898 1899 return Arg; 1900 } 1901 1902 llvm::Value* CodeGenFunction::EmitAsmInput( 1903 const TargetInfo::ConstraintInfo &Info, 1904 const Expr *InputExpr, 1905 std::string &ConstraintStr) { 1906 // If this can't be a register or memory, i.e., has to be a constant 1907 // (immediate or symbolic), try to emit it as such. 1908 if (!Info.allowsRegister() && !Info.allowsMemory()) { 1909 if (Info.requiresImmediateConstant()) { 1910 Expr::EvalResult EVResult; 1911 InputExpr->EvaluateAsRValue(EVResult, getContext(), true); 1912 1913 llvm::APSInt IntResult; 1914 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), 1915 getContext())) 1916 return llvm::ConstantInt::get(getLLVMContext(), IntResult); 1917 } 1918 1919 Expr::EvalResult Result; 1920 if (InputExpr->EvaluateAsInt(Result, getContext())) 1921 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()); 1922 } 1923 1924 if (Info.allowsRegister() || !Info.allowsMemory()) 1925 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) 1926 return EmitScalarExpr(InputExpr); 1927 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) 1928 return EmitScalarExpr(InputExpr); 1929 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); 1930 LValue Dest = EmitLValue(InputExpr); 1931 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, 1932 InputExpr->getExprLoc()); 1933 } 1934 1935 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline 1936 /// asm call instruction. The !srcloc MDNode contains a list of constant 1937 /// integers which are the source locations of the start of each line in the 1938 /// asm. 1939 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, 1940 CodeGenFunction &CGF) { 1941 SmallVector<llvm::Metadata *, 8> Locs; 1942 // Add the location of the first line to the MDNode. 1943 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 1944 CGF.Int32Ty, Str->getBeginLoc().getRawEncoding()))); 1945 StringRef StrVal = Str->getString(); 1946 if (!StrVal.empty()) { 1947 const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); 1948 const LangOptions &LangOpts = CGF.CGM.getLangOpts(); 1949 unsigned StartToken = 0; 1950 unsigned ByteOffset = 0; 1951 1952 // Add the location of the start of each subsequent line of the asm to the 1953 // MDNode. 1954 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) { 1955 if (StrVal[i] != '\n') continue; 1956 SourceLocation LineLoc = Str->getLocationOfByte( 1957 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset); 1958 Locs.push_back(llvm::ConstantAsMetadata::get( 1959 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding()))); 1960 } 1961 } 1962 1963 return llvm::MDNode::get(CGF.getLLVMContext(), Locs); 1964 } 1965 1966 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, 1967 bool ReadOnly, bool ReadNone, bool NoMerge, 1968 const AsmStmt &S, 1969 const std::vector<llvm::Type *> &ResultRegTypes, 1970 CodeGenFunction &CGF, 1971 std::vector<llvm::Value *> &RegResults) { 1972 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1973 llvm::Attribute::NoUnwind); 1974 if (NoMerge) 1975 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1976 llvm::Attribute::NoMerge); 1977 // Attach readnone and readonly attributes. 1978 if (!HasSideEffect) { 1979 if (ReadNone) 1980 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1981 llvm::Attribute::ReadNone); 1982 else if (ReadOnly) 1983 Result.addAttribute(llvm::AttributeList::FunctionIndex, 1984 llvm::Attribute::ReadOnly); 1985 } 1986 1987 // Slap the source location of the inline asm into a !srcloc metadata on the 1988 // call. 1989 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) 1990 Result.setMetadata("srcloc", 1991 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF)); 1992 else { 1993 // At least put the line number on MS inline asm blobs. 1994 llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty, 1995 S.getAsmLoc().getRawEncoding()); 1996 Result.setMetadata("srcloc", 1997 llvm::MDNode::get(CGF.getLLVMContext(), 1998 llvm::ConstantAsMetadata::get(Loc))); 1999 } 2000 2001 if (CGF.getLangOpts().assumeFunctionsAreConvergent()) 2002 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as 2003 // convergent (meaning, they may call an intrinsically convergent op, such 2004 // as bar.sync, and so can't have certain optimizations applied around 2005 // them). 2006 Result.addAttribute(llvm::AttributeList::FunctionIndex, 2007 llvm::Attribute::Convergent); 2008 // Extract all of the register value results from the asm. 2009 if (ResultRegTypes.size() == 1) { 2010 RegResults.push_back(&Result); 2011 } else { 2012 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { 2013 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult"); 2014 RegResults.push_back(Tmp); 2015 } 2016 } 2017 } 2018 2019 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { 2020 // Assemble the final asm string. 2021 std::string AsmString = S.generateAsmString(getContext()); 2022 2023 // Get all the output and input constraints together. 2024 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; 2025 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; 2026 2027 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 2028 StringRef Name; 2029 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 2030 Name = GAS->getOutputName(i); 2031 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); 2032 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; 2033 assert(IsValid && "Failed to parse output constraint"); 2034 OutputConstraintInfos.push_back(Info); 2035 } 2036 2037 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 2038 StringRef Name; 2039 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) 2040 Name = GAS->getInputName(i); 2041 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); 2042 bool IsValid = 2043 getTarget().validateInputConstraint(OutputConstraintInfos, Info); 2044 assert(IsValid && "Failed to parse input constraint"); (void)IsValid; 2045 InputConstraintInfos.push_back(Info); 2046 } 2047 2048 std::string Constraints; 2049 2050 std::vector<LValue> ResultRegDests; 2051 std::vector<QualType> ResultRegQualTys; 2052 std::vector<llvm::Type *> ResultRegTypes; 2053 std::vector<llvm::Type *> ResultTruncRegTypes; 2054 std::vector<llvm::Type *> ArgTypes; 2055 std::vector<llvm::Value*> Args; 2056 llvm::BitVector ResultTypeRequiresCast; 2057 2058 // Keep track of inout constraints. 2059 std::string InOutConstraints; 2060 std::vector<llvm::Value*> InOutArgs; 2061 std::vector<llvm::Type*> InOutArgTypes; 2062 2063 // Keep track of out constraints for tied input operand. 2064 std::vector<std::string> OutputConstraints; 2065 2066 // Keep track of defined physregs. 2067 llvm::SmallSet<std::string, 8> PhysRegOutputs; 2068 2069 // An inline asm can be marked readonly if it meets the following conditions: 2070 // - it doesn't have any sideeffects 2071 // - it doesn't clobber memory 2072 // - it doesn't return a value by-reference 2073 // It can be marked readnone if it doesn't have any input memory constraints 2074 // in addition to meeting the conditions listed above. 2075 bool ReadOnly = true, ReadNone = true; 2076 2077 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { 2078 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; 2079 2080 // Simplify the output constraint. 2081 std::string OutputConstraint(S.getOutputConstraint(i)); 2082 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, 2083 getTarget(), &OutputConstraintInfos); 2084 2085 const Expr *OutExpr = S.getOutputExpr(i); 2086 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); 2087 2088 std::string GCCReg; 2089 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, 2090 getTarget(), CGM, S, 2091 Info.earlyClobber(), 2092 &GCCReg); 2093 // Give an error on multiple outputs to same physreg. 2094 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second) 2095 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); 2096 2097 OutputConstraints.push_back(OutputConstraint); 2098 LValue Dest = EmitLValue(OutExpr); 2099 if (!Constraints.empty()) 2100 Constraints += ','; 2101 2102 // If this is a register output, then make the inline asm return it 2103 // by-value. If this is a memory result, return the value by-reference. 2104 bool isScalarizableAggregate = 2105 hasAggregateEvaluationKind(OutExpr->getType()); 2106 if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) || 2107 isScalarizableAggregate)) { 2108 Constraints += "=" + OutputConstraint; 2109 ResultRegQualTys.push_back(OutExpr->getType()); 2110 ResultRegDests.push_back(Dest); 2111 ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType())); 2112 if (Info.allowsRegister() && isScalarizableAggregate) { 2113 ResultTypeRequiresCast.push_back(true); 2114 unsigned Size = getContext().getTypeSize(OutExpr->getType()); 2115 llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size); 2116 ResultRegTypes.push_back(ConvTy); 2117 } else { 2118 ResultTypeRequiresCast.push_back(false); 2119 ResultRegTypes.push_back(ResultTruncRegTypes.back()); 2120 } 2121 // If this output is tied to an input, and if the input is larger, then 2122 // we need to set the actual result type of the inline asm node to be the 2123 // same as the input type. 2124 if (Info.hasMatchingInput()) { 2125 unsigned InputNo; 2126 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { 2127 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; 2128 if (Input.hasTiedOperand() && Input.getTiedOperand() == i) 2129 break; 2130 } 2131 assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); 2132 2133 QualType InputTy = S.getInputExpr(InputNo)->getType(); 2134 QualType OutputType = OutExpr->getType(); 2135 2136 uint64_t InputSize = getContext().getTypeSize(InputTy); 2137 if (getContext().getTypeSize(OutputType) < InputSize) { 2138 // Form the asm to return the value as a larger integer or fp type. 2139 ResultRegTypes.back() = ConvertType(InputTy); 2140 } 2141 } 2142 if (llvm::Type* AdjTy = 2143 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2144 ResultRegTypes.back())) 2145 ResultRegTypes.back() = AdjTy; 2146 else { 2147 CGM.getDiags().Report(S.getAsmLoc(), 2148 diag::err_asm_invalid_type_in_input) 2149 << OutExpr->getType() << OutputConstraint; 2150 } 2151 2152 // Update largest vector width for any vector types. 2153 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back())) 2154 LargestVectorWidth = 2155 std::max((uint64_t)LargestVectorWidth, 2156 VT->getPrimitiveSizeInBits().getKnownMinSize()); 2157 } else { 2158 ArgTypes.push_back(Dest.getAddress(*this).getType()); 2159 Args.push_back(Dest.getPointer(*this)); 2160 Constraints += "=*"; 2161 Constraints += OutputConstraint; 2162 ReadOnly = ReadNone = false; 2163 } 2164 2165 if (Info.isReadWrite()) { 2166 InOutConstraints += ','; 2167 2168 const Expr *InputExpr = S.getOutputExpr(i); 2169 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(), 2170 InOutConstraints, 2171 InputExpr->getExprLoc()); 2172 2173 if (llvm::Type* AdjTy = 2174 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, 2175 Arg->getType())) 2176 Arg = Builder.CreateBitCast(Arg, AdjTy); 2177 2178 // Update largest vector width for any vector types. 2179 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 2180 LargestVectorWidth = 2181 std::max((uint64_t)LargestVectorWidth, 2182 VT->getPrimitiveSizeInBits().getKnownMinSize()); 2183 // Don't tie physregs. 2184 if (Info.allowsRegister() && GCCReg.empty()) 2185 InOutConstraints += llvm::utostr(i); 2186 else 2187 InOutConstraints += OutputConstraint; 2188 2189 InOutArgTypes.push_back(Arg->getType()); 2190 InOutArgs.push_back(Arg); 2191 } 2192 } 2193 2194 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) 2195 // to the return value slot. Only do this when returning in registers. 2196 if (isa<MSAsmStmt>(&S)) { 2197 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 2198 if (RetAI.isDirect() || RetAI.isExtend()) { 2199 // Make a fake lvalue for the return value slot. 2200 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy); 2201 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( 2202 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, 2203 ResultRegDests, AsmString, S.getNumOutputs()); 2204 SawAsmBlock = true; 2205 } 2206 } 2207 2208 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { 2209 const Expr *InputExpr = S.getInputExpr(i); 2210 2211 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; 2212 2213 if (Info.allowsMemory()) 2214 ReadNone = false; 2215 2216 if (!Constraints.empty()) 2217 Constraints += ','; 2218 2219 // Simplify the input constraint. 2220 std::string InputConstraint(S.getInputConstraint(i)); 2221 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), 2222 &OutputConstraintInfos); 2223 2224 InputConstraint = AddVariableConstraints( 2225 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), 2226 getTarget(), CGM, S, false /* No EarlyClobber */); 2227 2228 std::string ReplaceConstraint (InputConstraint); 2229 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); 2230 2231 // If this input argument is tied to a larger output result, extend the 2232 // input to be the same size as the output. The LLVM backend wants to see 2233 // the input and output of a matching constraint be the same size. Note 2234 // that GCC does not define what the top bits are here. We use zext because 2235 // that is usually cheaper, but LLVM IR should really get an anyext someday. 2236 if (Info.hasTiedOperand()) { 2237 unsigned Output = Info.getTiedOperand(); 2238 QualType OutputType = S.getOutputExpr(Output)->getType(); 2239 QualType InputTy = InputExpr->getType(); 2240 2241 if (getContext().getTypeSize(OutputType) > 2242 getContext().getTypeSize(InputTy)) { 2243 // Use ptrtoint as appropriate so that we can do our extension. 2244 if (isa<llvm::PointerType>(Arg->getType())) 2245 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); 2246 llvm::Type *OutputTy = ConvertType(OutputType); 2247 if (isa<llvm::IntegerType>(OutputTy)) 2248 Arg = Builder.CreateZExt(Arg, OutputTy); 2249 else if (isa<llvm::PointerType>(OutputTy)) 2250 Arg = Builder.CreateZExt(Arg, IntPtrTy); 2251 else { 2252 assert(OutputTy->isFloatingPointTy() && "Unexpected output type"); 2253 Arg = Builder.CreateFPExt(Arg, OutputTy); 2254 } 2255 } 2256 // Deal with the tied operands' constraint code in adjustInlineAsmType. 2257 ReplaceConstraint = OutputConstraints[Output]; 2258 } 2259 if (llvm::Type* AdjTy = 2260 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint, 2261 Arg->getType())) 2262 Arg = Builder.CreateBitCast(Arg, AdjTy); 2263 else 2264 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) 2265 << InputExpr->getType() << InputConstraint; 2266 2267 // Update largest vector width for any vector types. 2268 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) 2269 LargestVectorWidth = 2270 std::max((uint64_t)LargestVectorWidth, 2271 VT->getPrimitiveSizeInBits().getKnownMinSize()); 2272 2273 ArgTypes.push_back(Arg->getType()); 2274 Args.push_back(Arg); 2275 Constraints += InputConstraint; 2276 } 2277 2278 // Labels 2279 SmallVector<llvm::BasicBlock *, 16> Transfer; 2280 llvm::BasicBlock *Fallthrough = nullptr; 2281 bool IsGCCAsmGoto = false; 2282 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) { 2283 IsGCCAsmGoto = GS->isAsmGoto(); 2284 if (IsGCCAsmGoto) { 2285 for (const auto *E : GS->labels()) { 2286 JumpDest Dest = getJumpDestForLabel(E->getLabel()); 2287 Transfer.push_back(Dest.getBlock()); 2288 llvm::BlockAddress *BA = 2289 llvm::BlockAddress::get(CurFn, Dest.getBlock()); 2290 Args.push_back(BA); 2291 ArgTypes.push_back(BA->getType()); 2292 if (!Constraints.empty()) 2293 Constraints += ','; 2294 Constraints += 'X'; 2295 } 2296 Fallthrough = createBasicBlock("asm.fallthrough"); 2297 } 2298 } 2299 2300 // Append the "input" part of inout constraints last. 2301 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { 2302 ArgTypes.push_back(InOutArgTypes[i]); 2303 Args.push_back(InOutArgs[i]); 2304 } 2305 Constraints += InOutConstraints; 2306 2307 // Clobbers 2308 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { 2309 StringRef Clobber = S.getClobber(i); 2310 2311 if (Clobber == "memory") 2312 ReadOnly = ReadNone = false; 2313 else if (Clobber != "cc") { 2314 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); 2315 if (CGM.getCodeGenOpts().StackClashProtector && 2316 getTarget().isSPRegName(Clobber)) { 2317 CGM.getDiags().Report(S.getAsmLoc(), 2318 diag::warn_stack_clash_protection_inline_asm); 2319 } 2320 } 2321 2322 if (!Constraints.empty()) 2323 Constraints += ','; 2324 2325 Constraints += "~{"; 2326 Constraints += Clobber; 2327 Constraints += '}'; 2328 } 2329 2330 // Add machine specific clobbers 2331 std::string MachineClobbers = getTarget().getClobbers(); 2332 if (!MachineClobbers.empty()) { 2333 if (!Constraints.empty()) 2334 Constraints += ','; 2335 Constraints += MachineClobbers; 2336 } 2337 2338 llvm::Type *ResultType; 2339 if (ResultRegTypes.empty()) 2340 ResultType = VoidTy; 2341 else if (ResultRegTypes.size() == 1) 2342 ResultType = ResultRegTypes[0]; 2343 else 2344 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); 2345 2346 llvm::FunctionType *FTy = 2347 llvm::FunctionType::get(ResultType, ArgTypes, false); 2348 2349 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; 2350 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? 2351 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; 2352 llvm::InlineAsm *IA = 2353 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, 2354 /* IsAlignStack */ false, AsmDialect); 2355 std::vector<llvm::Value*> RegResults; 2356 if (IsGCCAsmGoto) { 2357 llvm::CallBrInst *Result = 2358 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); 2359 EmitBlock(Fallthrough); 2360 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly, 2361 ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes, 2362 *this, RegResults); 2363 } else { 2364 llvm::CallInst *Result = 2365 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); 2366 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly, 2367 ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes, 2368 *this, RegResults); 2369 } 2370 2371 assert(RegResults.size() == ResultRegTypes.size()); 2372 assert(RegResults.size() == ResultTruncRegTypes.size()); 2373 assert(RegResults.size() == ResultRegDests.size()); 2374 // ResultRegDests can be also populated by addReturnRegisterOutputs() above, 2375 // in which case its size may grow. 2376 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); 2377 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { 2378 llvm::Value *Tmp = RegResults[i]; 2379 2380 // If the result type of the LLVM IR asm doesn't match the result type of 2381 // the expression, do the conversion. 2382 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { 2383 llvm::Type *TruncTy = ResultTruncRegTypes[i]; 2384 2385 // Truncate the integer result to the right size, note that TruncTy can be 2386 // a pointer. 2387 if (TruncTy->isFloatingPointTy()) 2388 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); 2389 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { 2390 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); 2391 Tmp = Builder.CreateTrunc(Tmp, 2392 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); 2393 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); 2394 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { 2395 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); 2396 Tmp = Builder.CreatePtrToInt(Tmp, 2397 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); 2398 Tmp = Builder.CreateTrunc(Tmp, TruncTy); 2399 } else if (TruncTy->isIntegerTy()) { 2400 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); 2401 } else if (TruncTy->isVectorTy()) { 2402 Tmp = Builder.CreateBitCast(Tmp, TruncTy); 2403 } 2404 } 2405 2406 LValue Dest = ResultRegDests[i]; 2407 // ResultTypeRequiresCast elements correspond to the first 2408 // ResultTypeRequiresCast.size() elements of RegResults. 2409 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { 2410 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]); 2411 Address A = Builder.CreateBitCast(Dest.getAddress(*this), 2412 ResultRegTypes[i]->getPointerTo()); 2413 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false); 2414 if (Ty.isNull()) { 2415 const Expr *OutExpr = S.getOutputExpr(i); 2416 CGM.Error( 2417 OutExpr->getExprLoc(), 2418 "impossible constraint in asm: can't store value into a register"); 2419 return; 2420 } 2421 Dest = MakeAddrLValue(A, Ty); 2422 } 2423 EmitStoreThroughLValue(RValue::get(Tmp), Dest); 2424 } 2425 } 2426 2427 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { 2428 const RecordDecl *RD = S.getCapturedRecordDecl(); 2429 QualType RecordTy = getContext().getRecordType(RD); 2430 2431 // Initialize the captured struct. 2432 LValue SlotLV = 2433 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy); 2434 2435 RecordDecl::field_iterator CurField = RD->field_begin(); 2436 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 2437 E = S.capture_init_end(); 2438 I != E; ++I, ++CurField) { 2439 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 2440 if (CurField->hasCapturedVLAType()) { 2441 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); 2442 } else { 2443 EmitInitializerForField(*CurField, LV, *I); 2444 } 2445 } 2446 2447 return SlotLV; 2448 } 2449 2450 /// Generate an outlined function for the body of a CapturedStmt, store any 2451 /// captured variables into the captured struct, and call the outlined function. 2452 llvm::Function * 2453 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { 2454 LValue CapStruct = InitCapturedStruct(S); 2455 2456 // Emit the CapturedDecl 2457 CodeGenFunction CGF(CGM, true); 2458 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); 2459 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); 2460 delete CGF.CapturedStmtInfo; 2461 2462 // Emit call to the helper function. 2463 EmitCallOrInvoke(F, CapStruct.getPointer(*this)); 2464 2465 return F; 2466 } 2467 2468 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { 2469 LValue CapStruct = InitCapturedStruct(S); 2470 return CapStruct.getAddress(*this); 2471 } 2472 2473 /// Creates the outlined function for a CapturedStmt. 2474 llvm::Function * 2475 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { 2476 assert(CapturedStmtInfo && 2477 "CapturedStmtInfo should be set when generating the captured function"); 2478 const CapturedDecl *CD = S.getCapturedDecl(); 2479 const RecordDecl *RD = S.getCapturedRecordDecl(); 2480 SourceLocation Loc = S.getBeginLoc(); 2481 assert(CD->hasBody() && "missing CapturedDecl body"); 2482 2483 // Build the argument list. 2484 ASTContext &Ctx = CGM.getContext(); 2485 FunctionArgList Args; 2486 Args.append(CD->param_begin(), CD->param_end()); 2487 2488 // Create the function declaration. 2489 const CGFunctionInfo &FuncInfo = 2490 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); 2491 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 2492 2493 llvm::Function *F = 2494 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 2495 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 2496 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 2497 if (CD->isNothrow()) 2498 F->addFnAttr(llvm::Attribute::NoUnwind); 2499 2500 // Generate the function. 2501 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), 2502 CD->getBody()->getBeginLoc()); 2503 // Set the context parameter in CapturedStmtInfo. 2504 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam()); 2505 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); 2506 2507 // Initialize variable-length arrays. 2508 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), 2509 Ctx.getTagDeclType(RD)); 2510 for (auto *FD : RD->fields()) { 2511 if (FD->hasCapturedVLAType()) { 2512 auto *ExprArg = 2513 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc()) 2514 .getScalarVal(); 2515 auto VAT = FD->getCapturedVLAType(); 2516 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 2517 } 2518 } 2519 2520 // If 'this' is captured, load it into CXXThisValue. 2521 if (CapturedStmtInfo->isCXXThisExprCaptured()) { 2522 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); 2523 LValue ThisLValue = EmitLValueForField(Base, FD); 2524 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); 2525 } 2526 2527 PGO.assignRegionCounters(GlobalDecl(CD), F); 2528 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 2529 FinishFunction(CD->getBodyRBrace()); 2530 2531 return F; 2532 } 2533