1 //==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines a generic engine for intraprocedural, path-sensitive, 11 // dataflow analysis via graph reachability engine. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h" 16 #include "clang/AST/Expr.h" 17 #include "clang/AST/ExprCXX.h" 18 #include "clang/AST/StmtCXX.h" 19 #include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" 20 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/Support/Casting.h" 24 25 using namespace clang; 26 using namespace ento; 27 28 #define DEBUG_TYPE "CoreEngine" 29 30 STATISTIC(NumSteps, 31 "The # of steps executed."); 32 STATISTIC(NumReachedMaxSteps, 33 "The # of times we reached the max number of steps."); 34 STATISTIC(NumPathsExplored, 35 "The # of paths explored by the analyzer."); 36 37 STATISTIC(MaxQueueSize, "Maximum size of the worklist"); 38 STATISTIC(MaxReachableSize, "Maximum size of auxiliary worklist set"); 39 40 //===----------------------------------------------------------------------===// 41 // Worklist classes for exploration of reachable states. 42 //===----------------------------------------------------------------------===// 43 44 namespace { 45 class DFS : public WorkList { 46 SmallVector<WorkListUnit,20> Stack; 47 public: 48 bool hasWork() const override { 49 return !Stack.empty(); 50 } 51 52 void enqueue(const WorkListUnit& U) override { 53 Stack.push_back(U); 54 } 55 56 WorkListUnit dequeue() override { 57 assert (!Stack.empty()); 58 const WorkListUnit& U = Stack.back(); 59 Stack.pop_back(); // This technically "invalidates" U, but we are fine. 60 return U; 61 } 62 }; 63 64 class BFS : public WorkList { 65 std::deque<WorkListUnit> Queue; 66 public: 67 bool hasWork() const override { 68 return !Queue.empty(); 69 } 70 71 void enqueue(const WorkListUnit& U) override { 72 Queue.push_back(U); 73 } 74 75 WorkListUnit dequeue() override { 76 WorkListUnit U = Queue.front(); 77 Queue.pop_front(); 78 return U; 79 } 80 81 }; 82 83 } // end anonymous namespace 84 85 // Place the dstor for WorkList here because it contains virtual member 86 // functions, and we the code for the dstor generated in one compilation unit. 87 WorkList::~WorkList() {} 88 89 std::unique_ptr<WorkList> WorkList::makeDFS() { 90 return llvm::make_unique<DFS>(); 91 } 92 93 std::unique_ptr<WorkList> WorkList::makeBFS() { 94 return llvm::make_unique<BFS>(); 95 } 96 97 namespace { 98 class BFSBlockDFSContents : public WorkList { 99 std::deque<WorkListUnit> Queue; 100 SmallVector<WorkListUnit,20> Stack; 101 public: 102 bool hasWork() const override { 103 return !Queue.empty() || !Stack.empty(); 104 } 105 106 void enqueue(const WorkListUnit& U) override { 107 if (U.getNode()->getLocation().getAs<BlockEntrance>()) 108 Queue.push_front(U); 109 else 110 Stack.push_back(U); 111 } 112 113 WorkListUnit dequeue() override { 114 // Process all basic blocks to completion. 115 if (!Stack.empty()) { 116 const WorkListUnit& U = Stack.back(); 117 Stack.pop_back(); // This technically "invalidates" U, but we are fine. 118 return U; 119 } 120 121 assert(!Queue.empty()); 122 // Don't use const reference. The subsequent pop_back() might make it 123 // unsafe. 124 WorkListUnit U = Queue.front(); 125 Queue.pop_front(); 126 return U; 127 } 128 }; 129 } // end anonymous namespace 130 131 std::unique_ptr<WorkList> WorkList::makeBFSBlockDFSContents() { 132 return llvm::make_unique<BFSBlockDFSContents>(); 133 } 134 135 namespace { 136 class UnexploredFirstStack : public WorkList { 137 138 /// Stack of nodes known to have statements we have not traversed yet. 139 SmallVector<WorkListUnit, 20> StackUnexplored; 140 141 /// Stack of all other nodes. 142 SmallVector<WorkListUnit, 20> StackOthers; 143 144 typedef unsigned BlockID; 145 typedef std::pair<BlockID, const StackFrameContext *> LocIdentifier; 146 llvm::DenseSet<LocIdentifier> Reachable; 147 148 public: 149 bool hasWork() const override { 150 return !(StackUnexplored.empty() && StackOthers.empty()); 151 } 152 153 void enqueue(const WorkListUnit &U) override { 154 const ExplodedNode *N = U.getNode(); 155 auto BE = N->getLocation().getAs<BlockEntrance>(); 156 157 if (!BE) { 158 159 // Assume the choice of the order of the preceeding block entrance was 160 // correct. 161 StackUnexplored.push_back(U); 162 } else { 163 LocIdentifier LocId = std::make_pair( 164 BE->getBlock()->getBlockID(), N->getStackFrame()); 165 auto InsertInfo = Reachable.insert(LocId); 166 167 if (InsertInfo.second) { 168 StackUnexplored.push_back(U); 169 } else { 170 StackOthers.push_back(U); 171 } 172 } 173 MaxReachableSize.updateMax(Reachable.size()); 174 MaxQueueSize.updateMax(StackUnexplored.size() + StackOthers.size()); 175 } 176 177 WorkListUnit dequeue() override { 178 if (!StackUnexplored.empty()) { 179 WorkListUnit &U = StackUnexplored.back(); 180 StackUnexplored.pop_back(); 181 return U; 182 } else { 183 WorkListUnit &U = StackOthers.back(); 184 StackOthers.pop_back(); 185 return U; 186 } 187 } 188 }; 189 } // end anonymous namespace 190 191 std::unique_ptr<WorkList> WorkList::makeUnexploredFirst() { 192 return llvm::make_unique<UnexploredFirstStack>(); 193 } 194 195 //===----------------------------------------------------------------------===// 196 // Core analysis engine. 197 //===----------------------------------------------------------------------===// 198 199 static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) { 200 switch (Opts.getExplorationStrategy()) { 201 case AnalyzerOptions::ExplorationStrategyKind::DFS: 202 return WorkList::makeDFS(); 203 case AnalyzerOptions::ExplorationStrategyKind::BFS: 204 return WorkList::makeBFS(); 205 case AnalyzerOptions::ExplorationStrategyKind::BFSBlockDFSContents: 206 return WorkList::makeBFSBlockDFSContents(); 207 case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirst: 208 return WorkList::makeUnexploredFirst(); 209 default: 210 llvm_unreachable("Unexpected case"); 211 } 212 } 213 214 CoreEngine::CoreEngine(SubEngine &subengine, 215 FunctionSummariesTy *FS, 216 AnalyzerOptions &Opts) : SubEng(subengine), 217 WList(generateWorkList(Opts)), 218 BCounterFactory(G.getAllocator()), 219 FunctionSummaries(FS) {} 220 221 /// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps. 222 bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps, 223 ProgramStateRef InitState) { 224 225 if (G.num_roots() == 0) { // Initialize the analysis by constructing 226 // the root if none exists. 227 228 const CFGBlock *Entry = &(L->getCFG()->getEntry()); 229 230 assert (Entry->empty() && 231 "Entry block must be empty."); 232 233 assert (Entry->succ_size() == 1 && 234 "Entry block must have 1 successor."); 235 236 // Mark the entry block as visited. 237 FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(), 238 L->getDecl(), 239 L->getCFG()->getNumBlockIDs()); 240 241 // Get the solitary successor. 242 const CFGBlock *Succ = *(Entry->succ_begin()); 243 244 // Construct an edge representing the 245 // starting location in the function. 246 BlockEdge StartLoc(Entry, Succ, L); 247 248 // Set the current block counter to being empty. 249 WList->setBlockCounter(BCounterFactory.GetEmptyCounter()); 250 251 if (!InitState) 252 InitState = SubEng.getInitialState(L); 253 254 bool IsNew; 255 ExplodedNode *Node = G.getNode(StartLoc, InitState, false, &IsNew); 256 assert (IsNew); 257 G.addRoot(Node); 258 259 NodeBuilderContext BuilderCtx(*this, StartLoc.getDst(), Node); 260 ExplodedNodeSet DstBegin; 261 SubEng.processBeginOfFunction(BuilderCtx, Node, DstBegin, StartLoc); 262 263 enqueue(DstBegin); 264 } 265 266 // Check if we have a steps limit 267 bool UnlimitedSteps = Steps == 0; 268 // Cap our pre-reservation in the event that the user specifies 269 // a very large number of maximum steps. 270 const unsigned PreReservationCap = 4000000; 271 if(!UnlimitedSteps) 272 G.reserve(std::min(Steps,PreReservationCap)); 273 274 while (WList->hasWork()) { 275 if (!UnlimitedSteps) { 276 if (Steps == 0) { 277 NumReachedMaxSteps++; 278 break; 279 } 280 --Steps; 281 } 282 283 NumSteps++; 284 285 const WorkListUnit& WU = WList->dequeue(); 286 287 // Set the current block counter. 288 WList->setBlockCounter(WU.getBlockCounter()); 289 290 // Retrieve the node. 291 ExplodedNode *Node = WU.getNode(); 292 293 dispatchWorkItem(Node, Node->getLocation(), WU); 294 } 295 SubEng.processEndWorklist(hasWorkRemaining()); 296 return WList->hasWork(); 297 } 298 299 void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc, 300 const WorkListUnit& WU) { 301 // Dispatch on the location type. 302 switch (Loc.getKind()) { 303 case ProgramPoint::BlockEdgeKind: 304 HandleBlockEdge(Loc.castAs<BlockEdge>(), Pred); 305 break; 306 307 case ProgramPoint::BlockEntranceKind: 308 HandleBlockEntrance(Loc.castAs<BlockEntrance>(), Pred); 309 break; 310 311 case ProgramPoint::BlockExitKind: 312 assert (false && "BlockExit location never occur in forward analysis."); 313 break; 314 315 case ProgramPoint::CallEnterKind: { 316 HandleCallEnter(Loc.castAs<CallEnter>(), Pred); 317 break; 318 } 319 320 case ProgramPoint::CallExitBeginKind: 321 SubEng.processCallExit(Pred); 322 break; 323 324 case ProgramPoint::EpsilonKind: { 325 assert(Pred->hasSinglePred() && 326 "Assume epsilon has exactly one predecessor by construction"); 327 ExplodedNode *PNode = Pred->getFirstPred(); 328 dispatchWorkItem(Pred, PNode->getLocation(), WU); 329 break; 330 } 331 default: 332 assert(Loc.getAs<PostStmt>() || 333 Loc.getAs<PostInitializer>() || 334 Loc.getAs<PostImplicitCall>() || 335 Loc.getAs<CallExitEnd>() || 336 Loc.getAs<LoopExit>() || 337 Loc.getAs<PostAllocatorCall>()); 338 HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred); 339 break; 340 } 341 } 342 343 bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L, 344 unsigned Steps, 345 ProgramStateRef InitState, 346 ExplodedNodeSet &Dst) { 347 bool DidNotFinish = ExecuteWorkList(L, Steps, InitState); 348 for (ExplodedGraph::eop_iterator I = G.eop_begin(), E = G.eop_end(); I != E; 349 ++I) { 350 Dst.Add(*I); 351 } 352 return DidNotFinish; 353 } 354 355 void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) { 356 357 const CFGBlock *Blk = L.getDst(); 358 NodeBuilderContext BuilderCtx(*this, Blk, Pred); 359 360 // Mark this block as visited. 361 const LocationContext *LC = Pred->getLocationContext(); 362 FunctionSummaries->markVisitedBasicBlock(Blk->getBlockID(), 363 LC->getDecl(), 364 LC->getCFG()->getNumBlockIDs()); 365 366 // Check if we are entering the EXIT block. 367 if (Blk == &(L.getLocationContext()->getCFG()->getExit())) { 368 369 assert (L.getLocationContext()->getCFG()->getExit().size() == 0 370 && "EXIT block cannot contain Stmts."); 371 372 // Get return statement.. 373 const ReturnStmt *RS = nullptr; 374 if (!L.getSrc()->empty()) { 375 if (Optional<CFGStmt> LastStmt = L.getSrc()->back().getAs<CFGStmt>()) { 376 RS = dyn_cast<ReturnStmt>(LastStmt->getStmt()); 377 } 378 } 379 380 // Process the final state transition. 381 SubEng.processEndOfFunction(BuilderCtx, Pred, RS); 382 383 // This path is done. Don't enqueue any more nodes. 384 return; 385 } 386 387 // Call into the SubEngine to process entering the CFGBlock. 388 ExplodedNodeSet dstNodes; 389 BlockEntrance BE(Blk, Pred->getLocationContext()); 390 NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE); 391 SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred); 392 393 // Auto-generate a node. 394 if (!nodeBuilder.hasGeneratedNodes()) { 395 nodeBuilder.generateNode(Pred->State, Pred); 396 } 397 398 // Enqueue nodes onto the worklist. 399 enqueue(dstNodes); 400 } 401 402 void CoreEngine::HandleBlockEntrance(const BlockEntrance &L, 403 ExplodedNode *Pred) { 404 405 // Increment the block counter. 406 const LocationContext *LC = Pred->getLocationContext(); 407 unsigned BlockId = L.getBlock()->getBlockID(); 408 BlockCounter Counter = WList->getBlockCounter(); 409 Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(), 410 BlockId); 411 WList->setBlockCounter(Counter); 412 413 // Process the entrance of the block. 414 if (Optional<CFGElement> E = L.getFirstElement()) { 415 NodeBuilderContext Ctx(*this, L.getBlock(), Pred); 416 SubEng.processCFGElement(*E, Pred, 0, &Ctx); 417 } 418 else 419 HandleBlockExit(L.getBlock(), Pred); 420 } 421 422 void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) { 423 424 if (const Stmt *Term = B->getTerminator()) { 425 switch (Term->getStmtClass()) { 426 default: 427 llvm_unreachable("Analysis for this terminator not implemented."); 428 429 case Stmt::CXXBindTemporaryExprClass: 430 HandleCleanupTemporaryBranch( 431 cast<CXXBindTemporaryExpr>(B->getTerminator().getStmt()), B, Pred); 432 return; 433 434 // Model static initializers. 435 case Stmt::DeclStmtClass: 436 HandleStaticInit(cast<DeclStmt>(Term), B, Pred); 437 return; 438 439 case Stmt::BinaryOperatorClass: // '&&' and '||' 440 HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred); 441 return; 442 443 case Stmt::BinaryConditionalOperatorClass: 444 case Stmt::ConditionalOperatorClass: 445 HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(), 446 Term, B, Pred); 447 return; 448 449 // FIXME: Use constant-folding in CFG construction to simplify this 450 // case. 451 452 case Stmt::ChooseExprClass: 453 HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred); 454 return; 455 456 case Stmt::CXXTryStmtClass: { 457 // Generate a node for each of the successors. 458 // Our logic for EH analysis can certainly be improved. 459 for (CFGBlock::const_succ_iterator it = B->succ_begin(), 460 et = B->succ_end(); it != et; ++it) { 461 if (const CFGBlock *succ = *it) { 462 generateNode(BlockEdge(B, succ, Pred->getLocationContext()), 463 Pred->State, Pred); 464 } 465 } 466 return; 467 } 468 469 case Stmt::DoStmtClass: 470 HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred); 471 return; 472 473 case Stmt::CXXForRangeStmtClass: 474 HandleBranch(cast<CXXForRangeStmt>(Term)->getCond(), Term, B, Pred); 475 return; 476 477 case Stmt::ForStmtClass: 478 HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred); 479 return; 480 481 case Stmt::ContinueStmtClass: 482 case Stmt::BreakStmtClass: 483 case Stmt::GotoStmtClass: 484 break; 485 486 case Stmt::IfStmtClass: 487 HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred); 488 return; 489 490 case Stmt::IndirectGotoStmtClass: { 491 // Only 1 successor: the indirect goto dispatch block. 492 assert (B->succ_size() == 1); 493 494 IndirectGotoNodeBuilder 495 builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(), 496 *(B->succ_begin()), this); 497 498 SubEng.processIndirectGoto(builder); 499 return; 500 } 501 502 case Stmt::ObjCForCollectionStmtClass: { 503 // In the case of ObjCForCollectionStmt, it appears twice in a CFG: 504 // 505 // (1) inside a basic block, which represents the binding of the 506 // 'element' variable to a value. 507 // (2) in a terminator, which represents the branch. 508 // 509 // For (1), subengines will bind a value (i.e., 0 or 1) indicating 510 // whether or not collection contains any more elements. We cannot 511 // just test to see if the element is nil because a container can 512 // contain nil elements. 513 HandleBranch(Term, Term, B, Pred); 514 return; 515 } 516 517 case Stmt::SwitchStmtClass: { 518 SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(), 519 this); 520 521 SubEng.processSwitch(builder); 522 return; 523 } 524 525 case Stmt::WhileStmtClass: 526 HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred); 527 return; 528 } 529 } 530 531 assert (B->succ_size() == 1 && 532 "Blocks with no terminator should have at most 1 successor."); 533 534 generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()), 535 Pred->State, Pred); 536 } 537 538 void CoreEngine::HandleCallEnter(const CallEnter &CE, ExplodedNode *Pred) { 539 NodeBuilderContext BuilderCtx(*this, CE.getEntry(), Pred); 540 SubEng.processCallEnter(BuilderCtx, CE, Pred); 541 } 542 543 void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term, 544 const CFGBlock * B, ExplodedNode *Pred) { 545 assert(B->succ_size() == 2); 546 NodeBuilderContext Ctx(*this, B, Pred); 547 ExplodedNodeSet Dst; 548 SubEng.processBranch(Cond, Term, Ctx, Pred, Dst, 549 *(B->succ_begin()), *(B->succ_begin()+1)); 550 // Enqueue the new frontier onto the worklist. 551 enqueue(Dst); 552 } 553 554 void CoreEngine::HandleCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE, 555 const CFGBlock *B, 556 ExplodedNode *Pred) { 557 assert(B->succ_size() == 2); 558 NodeBuilderContext Ctx(*this, B, Pred); 559 ExplodedNodeSet Dst; 560 SubEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()), 561 *(B->succ_begin() + 1)); 562 // Enqueue the new frontier onto the worklist. 563 enqueue(Dst); 564 } 565 566 void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B, 567 ExplodedNode *Pred) { 568 assert(B->succ_size() == 2); 569 NodeBuilderContext Ctx(*this, B, Pred); 570 ExplodedNodeSet Dst; 571 SubEng.processStaticInitializer(DS, Ctx, Pred, Dst, 572 *(B->succ_begin()), *(B->succ_begin()+1)); 573 // Enqueue the new frontier onto the worklist. 574 enqueue(Dst); 575 } 576 577 578 void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx, 579 ExplodedNode *Pred) { 580 assert(B); 581 assert(!B->empty()); 582 583 if (StmtIdx == B->size()) 584 HandleBlockExit(B, Pred); 585 else { 586 NodeBuilderContext Ctx(*this, B, Pred); 587 SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx); 588 } 589 } 590 591 /// generateNode - Utility method to generate nodes, hook up successors, 592 /// and add nodes to the worklist. 593 void CoreEngine::generateNode(const ProgramPoint &Loc, 594 ProgramStateRef State, 595 ExplodedNode *Pred) { 596 597 bool IsNew; 598 ExplodedNode *Node = G.getNode(Loc, State, false, &IsNew); 599 600 if (Pred) 601 Node->addPredecessor(Pred, G); // Link 'Node' with its predecessor. 602 else { 603 assert (IsNew); 604 G.addRoot(Node); // 'Node' has no predecessor. Make it a root. 605 } 606 607 // Only add 'Node' to the worklist if it was freshly generated. 608 if (IsNew) WList->enqueue(Node); 609 } 610 611 void CoreEngine::enqueueStmtNode(ExplodedNode *N, 612 const CFGBlock *Block, unsigned Idx) { 613 assert(Block); 614 assert (!N->isSink()); 615 616 // Check if this node entered a callee. 617 if (N->getLocation().getAs<CallEnter>()) { 618 // Still use the index of the CallExpr. It's needed to create the callee 619 // StackFrameContext. 620 WList->enqueue(N, Block, Idx); 621 return; 622 } 623 624 // Do not create extra nodes. Move to the next CFG element. 625 if (N->getLocation().getAs<PostInitializer>() || 626 N->getLocation().getAs<PostImplicitCall>()|| 627 N->getLocation().getAs<LoopExit>()) { 628 WList->enqueue(N, Block, Idx+1); 629 return; 630 } 631 632 if (N->getLocation().getAs<EpsilonPoint>()) { 633 WList->enqueue(N, Block, Idx); 634 return; 635 } 636 637 if ((*Block)[Idx].getKind() == CFGElement::NewAllocator) { 638 WList->enqueue(N, Block, Idx+1); 639 return; 640 } 641 642 // At this point, we know we're processing a normal statement. 643 CFGStmt CS = (*Block)[Idx].castAs<CFGStmt>(); 644 PostStmt Loc(CS.getStmt(), N->getLocationContext()); 645 646 if (Loc == N->getLocation().withTag(nullptr)) { 647 // Note: 'N' should be a fresh node because otherwise it shouldn't be 648 // a member of Deferred. 649 WList->enqueue(N, Block, Idx+1); 650 return; 651 } 652 653 bool IsNew; 654 ExplodedNode *Succ = G.getNode(Loc, N->getState(), false, &IsNew); 655 Succ->addPredecessor(N, G); 656 657 if (IsNew) 658 WList->enqueue(Succ, Block, Idx+1); 659 } 660 661 ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N, 662 const ReturnStmt *RS) { 663 // Create a CallExitBegin node and enqueue it. 664 const StackFrameContext *LocCtx 665 = cast<StackFrameContext>(N->getLocationContext()); 666 667 // Use the callee location context. 668 CallExitBegin Loc(LocCtx, RS); 669 670 bool isNew; 671 ExplodedNode *Node = G.getNode(Loc, N->getState(), false, &isNew); 672 Node->addPredecessor(N, G); 673 return isNew ? Node : nullptr; 674 } 675 676 677 void CoreEngine::enqueue(ExplodedNodeSet &Set) { 678 for (ExplodedNodeSet::iterator I = Set.begin(), 679 E = Set.end(); I != E; ++I) { 680 WList->enqueue(*I); 681 } 682 } 683 684 void CoreEngine::enqueue(ExplodedNodeSet &Set, 685 const CFGBlock *Block, unsigned Idx) { 686 for (ExplodedNodeSet::iterator I = Set.begin(), 687 E = Set.end(); I != E; ++I) { 688 enqueueStmtNode(*I, Block, Idx); 689 } 690 } 691 692 void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set, const ReturnStmt *RS) { 693 for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) { 694 ExplodedNode *N = *I; 695 // If we are in an inlined call, generate CallExitBegin node. 696 if (N->getLocationContext()->getParent()) { 697 N = generateCallExitBeginNode(N, RS); 698 if (N) 699 WList->enqueue(N); 700 } else { 701 // TODO: We should run remove dead bindings here. 702 G.addEndOfPath(N); 703 NumPathsExplored++; 704 } 705 } 706 } 707 708 709 void NodeBuilder::anchor() { } 710 711 ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc, 712 ProgramStateRef State, 713 ExplodedNode *FromN, 714 bool MarkAsSink) { 715 HasGeneratedNodes = true; 716 bool IsNew; 717 ExplodedNode *N = C.Eng.G.getNode(Loc, State, MarkAsSink, &IsNew); 718 N->addPredecessor(FromN, C.Eng.G); 719 Frontier.erase(FromN); 720 721 if (!IsNew) 722 return nullptr; 723 724 if (!MarkAsSink) 725 Frontier.Add(N); 726 727 return N; 728 } 729 730 void NodeBuilderWithSinks::anchor() { } 731 732 StmtNodeBuilder::~StmtNodeBuilder() { 733 if (EnclosingBldr) 734 for (ExplodedNodeSet::iterator I = Frontier.begin(), 735 E = Frontier.end(); I != E; ++I ) 736 EnclosingBldr->addNodes(*I); 737 } 738 739 void BranchNodeBuilder::anchor() { } 740 741 ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State, 742 bool branch, 743 ExplodedNode *NodePred) { 744 // If the branch has been marked infeasible we should not generate a node. 745 if (!isFeasible(branch)) 746 return nullptr; 747 748 ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF, 749 NodePred->getLocationContext()); 750 ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred); 751 return Succ; 752 } 753 754 ExplodedNode* 755 IndirectGotoNodeBuilder::generateNode(const iterator &I, 756 ProgramStateRef St, 757 bool IsSink) { 758 bool IsNew; 759 ExplodedNode *Succ = 760 Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()), 761 St, IsSink, &IsNew); 762 Succ->addPredecessor(Pred, Eng.G); 763 764 if (!IsNew) 765 return nullptr; 766 767 if (!IsSink) 768 Eng.WList->enqueue(Succ); 769 770 return Succ; 771 } 772 773 774 ExplodedNode* 775 SwitchNodeBuilder::generateCaseStmtNode(const iterator &I, 776 ProgramStateRef St) { 777 778 bool IsNew; 779 ExplodedNode *Succ = 780 Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()), 781 St, false, &IsNew); 782 Succ->addPredecessor(Pred, Eng.G); 783 if (!IsNew) 784 return nullptr; 785 786 Eng.WList->enqueue(Succ); 787 return Succ; 788 } 789 790 791 ExplodedNode* 792 SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St, 793 bool IsSink) { 794 // Get the block for the default case. 795 assert(Src->succ_rbegin() != Src->succ_rend()); 796 CFGBlock *DefaultBlock = *Src->succ_rbegin(); 797 798 // Sanity check for default blocks that are unreachable and not caught 799 // by earlier stages. 800 if (!DefaultBlock) 801 return nullptr; 802 803 bool IsNew; 804 ExplodedNode *Succ = 805 Eng.G.getNode(BlockEdge(Src, DefaultBlock, Pred->getLocationContext()), 806 St, IsSink, &IsNew); 807 Succ->addPredecessor(Pred, Eng.G); 808 809 if (!IsNew) 810 return nullptr; 811 812 if (!IsSink) 813 Eng.WList->enqueue(Succ); 814 815 return Succ; 816 } 817