1 //==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines a generic engine for intraprocedural, path-sensitive, 11 // dataflow analysis via graph reachability engine. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h" 16 #include "clang/AST/Expr.h" 17 #include "clang/AST/ExprCXX.h" 18 #include "clang/AST/StmtCXX.h" 19 #include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" 20 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/Support/Casting.h" 25 #include "llvm/ADT/PriorityQueue.h" 26 27 using namespace clang; 28 using namespace ento; 29 30 #define DEBUG_TYPE "CoreEngine" 31 32 STATISTIC(NumSteps, 33 "The # of steps executed."); 34 STATISTIC(NumReachedMaxSteps, 35 "The # of times we reached the max number of steps."); 36 STATISTIC(NumPathsExplored, 37 "The # of paths explored by the analyzer."); 38 39 STATISTIC(MaxQueueSize, "Maximum size of the worklist"); 40 STATISTIC(MaxReachableSize, "Maximum size of auxiliary worklist set"); 41 42 //===----------------------------------------------------------------------===// 43 // Worklist classes for exploration of reachable states. 44 //===----------------------------------------------------------------------===// 45 46 namespace { 47 class DFS : public WorkList { 48 SmallVector<WorkListUnit,20> Stack; 49 public: 50 bool hasWork() const override { 51 return !Stack.empty(); 52 } 53 54 void enqueue(const WorkListUnit& U) override { 55 Stack.push_back(U); 56 } 57 58 WorkListUnit dequeue() override { 59 assert (!Stack.empty()); 60 const WorkListUnit& U = Stack.back(); 61 Stack.pop_back(); // This technically "invalidates" U, but we are fine. 62 return U; 63 } 64 }; 65 66 class BFS : public WorkList { 67 std::deque<WorkListUnit> Queue; 68 public: 69 bool hasWork() const override { 70 return !Queue.empty(); 71 } 72 73 void enqueue(const WorkListUnit& U) override { 74 Queue.push_back(U); 75 } 76 77 WorkListUnit dequeue() override { 78 WorkListUnit U = Queue.front(); 79 Queue.pop_front(); 80 return U; 81 } 82 83 }; 84 85 } // end anonymous namespace 86 87 // Place the dstor for WorkList here because it contains virtual member 88 // functions, and we the code for the dstor generated in one compilation unit. 89 WorkList::~WorkList() {} 90 91 std::unique_ptr<WorkList> WorkList::makeDFS() { 92 return llvm::make_unique<DFS>(); 93 } 94 95 std::unique_ptr<WorkList> WorkList::makeBFS() { 96 return llvm::make_unique<BFS>(); 97 } 98 99 namespace { 100 class BFSBlockDFSContents : public WorkList { 101 std::deque<WorkListUnit> Queue; 102 SmallVector<WorkListUnit,20> Stack; 103 public: 104 bool hasWork() const override { 105 return !Queue.empty() || !Stack.empty(); 106 } 107 108 void enqueue(const WorkListUnit& U) override { 109 if (U.getNode()->getLocation().getAs<BlockEntrance>()) 110 Queue.push_front(U); 111 else 112 Stack.push_back(U); 113 } 114 115 WorkListUnit dequeue() override { 116 // Process all basic blocks to completion. 117 if (!Stack.empty()) { 118 const WorkListUnit& U = Stack.back(); 119 Stack.pop_back(); // This technically "invalidates" U, but we are fine. 120 return U; 121 } 122 123 assert(!Queue.empty()); 124 // Don't use const reference. The subsequent pop_back() might make it 125 // unsafe. 126 WorkListUnit U = Queue.front(); 127 Queue.pop_front(); 128 return U; 129 } 130 }; 131 } // end anonymous namespace 132 133 std::unique_ptr<WorkList> WorkList::makeBFSBlockDFSContents() { 134 return llvm::make_unique<BFSBlockDFSContents>(); 135 } 136 137 namespace { 138 class UnexploredFirstStack : public WorkList { 139 140 /// Stack of nodes known to have statements we have not traversed yet. 141 SmallVector<WorkListUnit, 20> StackUnexplored; 142 143 /// Stack of all other nodes. 144 SmallVector<WorkListUnit, 20> StackOthers; 145 146 typedef unsigned BlockID; 147 typedef std::pair<BlockID, const StackFrameContext *> LocIdentifier; 148 llvm::DenseSet<LocIdentifier> Reachable; 149 150 public: 151 bool hasWork() const override { 152 return !(StackUnexplored.empty() && StackOthers.empty()); 153 } 154 155 void enqueue(const WorkListUnit &U) override { 156 const ExplodedNode *N = U.getNode(); 157 auto BE = N->getLocation().getAs<BlockEntrance>(); 158 159 if (!BE) { 160 161 // Assume the choice of the order of the preceeding block entrance was 162 // correct. 163 StackUnexplored.push_back(U); 164 } else { 165 LocIdentifier LocId = std::make_pair( 166 BE->getBlock()->getBlockID(), N->getStackFrame()); 167 auto InsertInfo = Reachable.insert(LocId); 168 169 if (InsertInfo.second) { 170 StackUnexplored.push_back(U); 171 } else { 172 StackOthers.push_back(U); 173 } 174 } 175 MaxReachableSize.updateMax(Reachable.size()); 176 MaxQueueSize.updateMax(StackUnexplored.size() + StackOthers.size()); 177 } 178 179 WorkListUnit dequeue() override { 180 if (!StackUnexplored.empty()) { 181 WorkListUnit &U = StackUnexplored.back(); 182 StackUnexplored.pop_back(); 183 return U; 184 } else { 185 WorkListUnit &U = StackOthers.back(); 186 StackOthers.pop_back(); 187 return U; 188 } 189 } 190 }; 191 } // end anonymous namespace 192 193 std::unique_ptr<WorkList> WorkList::makeUnexploredFirst() { 194 return llvm::make_unique<UnexploredFirstStack>(); 195 } 196 197 class UnexploredFirstPriorityQueue : public WorkList { 198 typedef unsigned BlockID; 199 typedef std::pair<BlockID, const StackFrameContext *> LocIdentifier; 200 201 // How many times each location was visited. 202 // Is signed because we negate it later in order to have a reversed 203 // comparison. 204 typedef llvm::DenseMap<LocIdentifier, int> VisitedTimesMap; 205 206 // Compare by number of times the location was visited first (negated 207 // to prefer less often visited locations), then by insertion time (prefer 208 // expanding nodes inserted sooner first). 209 typedef std::pair<int, unsigned long> QueuePriority; 210 typedef std::pair<WorkListUnit, QueuePriority> QueueItem; 211 212 struct ExplorationComparator { 213 bool operator() (const QueueItem &LHS, const QueueItem &RHS) { 214 return LHS.second < RHS.second; 215 } 216 }; 217 218 // Number of inserted nodes, used to emulate DFS ordering in the priority 219 // queue when insertions are equal. 220 unsigned long Counter = 0; 221 222 // Number of times a current location was reached. 223 VisitedTimesMap NumReached; 224 225 // The top item is the largest one. 226 llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator> 227 queue; 228 229 public: 230 bool hasWork() const override { 231 return !queue.empty(); 232 } 233 234 void enqueue(const WorkListUnit &U) override { 235 const ExplodedNode *N = U.getNode(); 236 unsigned NumVisited = 0; 237 if (auto BE = N->getLocation().getAs<BlockEntrance>()) { 238 LocIdentifier LocId = std::make_pair( 239 BE->getBlock()->getBlockID(), N->getStackFrame()); 240 NumVisited = NumReached[LocId]++; 241 } 242 243 queue.push(std::make_pair(U, std::make_pair(-NumVisited, ++Counter))); 244 } 245 246 WorkListUnit dequeue() override { 247 QueueItem U = queue.top(); 248 queue.pop(); 249 return U.first; 250 } 251 }; 252 253 std::unique_ptr<WorkList> WorkList::makeUnexploredFirstPriorityQueue() { 254 return llvm::make_unique<UnexploredFirstPriorityQueue>(); 255 } 256 257 //===----------------------------------------------------------------------===// 258 // Core analysis engine. 259 //===----------------------------------------------------------------------===// 260 261 static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) { 262 switch (Opts.getExplorationStrategy()) { 263 case AnalyzerOptions::ExplorationStrategyKind::DFS: 264 return WorkList::makeDFS(); 265 case AnalyzerOptions::ExplorationStrategyKind::BFS: 266 return WorkList::makeBFS(); 267 case AnalyzerOptions::ExplorationStrategyKind::BFSBlockDFSContents: 268 return WorkList::makeBFSBlockDFSContents(); 269 case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirst: 270 return WorkList::makeUnexploredFirst(); 271 case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirstQueue: 272 return WorkList::makeUnexploredFirstPriorityQueue(); 273 default: 274 llvm_unreachable("Unexpected case"); 275 } 276 } 277 278 CoreEngine::CoreEngine(SubEngine &subengine, 279 FunctionSummariesTy *FS, 280 AnalyzerOptions &Opts) : SubEng(subengine), 281 WList(generateWorkList(Opts)), 282 BCounterFactory(G.getAllocator()), 283 FunctionSummaries(FS) {} 284 285 /// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps. 286 bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps, 287 ProgramStateRef InitState) { 288 289 if (G.num_roots() == 0) { // Initialize the analysis by constructing 290 // the root if none exists. 291 292 const CFGBlock *Entry = &(L->getCFG()->getEntry()); 293 294 assert (Entry->empty() && 295 "Entry block must be empty."); 296 297 assert (Entry->succ_size() == 1 && 298 "Entry block must have 1 successor."); 299 300 // Mark the entry block as visited. 301 FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(), 302 L->getDecl(), 303 L->getCFG()->getNumBlockIDs()); 304 305 // Get the solitary successor. 306 const CFGBlock *Succ = *(Entry->succ_begin()); 307 308 // Construct an edge representing the 309 // starting location in the function. 310 BlockEdge StartLoc(Entry, Succ, L); 311 312 // Set the current block counter to being empty. 313 WList->setBlockCounter(BCounterFactory.GetEmptyCounter()); 314 315 if (!InitState) 316 InitState = SubEng.getInitialState(L); 317 318 bool IsNew; 319 ExplodedNode *Node = G.getNode(StartLoc, InitState, false, &IsNew); 320 assert (IsNew); 321 G.addRoot(Node); 322 323 NodeBuilderContext BuilderCtx(*this, StartLoc.getDst(), Node); 324 ExplodedNodeSet DstBegin; 325 SubEng.processBeginOfFunction(BuilderCtx, Node, DstBegin, StartLoc); 326 327 enqueue(DstBegin); 328 } 329 330 // Check if we have a steps limit 331 bool UnlimitedSteps = Steps == 0; 332 // Cap our pre-reservation in the event that the user specifies 333 // a very large number of maximum steps. 334 const unsigned PreReservationCap = 4000000; 335 if(!UnlimitedSteps) 336 G.reserve(std::min(Steps,PreReservationCap)); 337 338 while (WList->hasWork()) { 339 if (!UnlimitedSteps) { 340 if (Steps == 0) { 341 NumReachedMaxSteps++; 342 break; 343 } 344 --Steps; 345 } 346 347 NumSteps++; 348 349 const WorkListUnit& WU = WList->dequeue(); 350 351 // Set the current block counter. 352 WList->setBlockCounter(WU.getBlockCounter()); 353 354 // Retrieve the node. 355 ExplodedNode *Node = WU.getNode(); 356 357 dispatchWorkItem(Node, Node->getLocation(), WU); 358 } 359 SubEng.processEndWorklist(hasWorkRemaining()); 360 return WList->hasWork(); 361 } 362 363 void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc, 364 const WorkListUnit& WU) { 365 // Dispatch on the location type. 366 switch (Loc.getKind()) { 367 case ProgramPoint::BlockEdgeKind: 368 HandleBlockEdge(Loc.castAs<BlockEdge>(), Pred); 369 break; 370 371 case ProgramPoint::BlockEntranceKind: 372 HandleBlockEntrance(Loc.castAs<BlockEntrance>(), Pred); 373 break; 374 375 case ProgramPoint::BlockExitKind: 376 assert (false && "BlockExit location never occur in forward analysis."); 377 break; 378 379 case ProgramPoint::CallEnterKind: { 380 HandleCallEnter(Loc.castAs<CallEnter>(), Pred); 381 break; 382 } 383 384 case ProgramPoint::CallExitBeginKind: 385 SubEng.processCallExit(Pred); 386 break; 387 388 case ProgramPoint::EpsilonKind: { 389 assert(Pred->hasSinglePred() && 390 "Assume epsilon has exactly one predecessor by construction"); 391 ExplodedNode *PNode = Pred->getFirstPred(); 392 dispatchWorkItem(Pred, PNode->getLocation(), WU); 393 break; 394 } 395 default: 396 assert(Loc.getAs<PostStmt>() || 397 Loc.getAs<PostInitializer>() || 398 Loc.getAs<PostImplicitCall>() || 399 Loc.getAs<CallExitEnd>() || 400 Loc.getAs<LoopExit>() || 401 Loc.getAs<PostAllocatorCall>()); 402 HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred); 403 break; 404 } 405 } 406 407 bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L, 408 unsigned Steps, 409 ProgramStateRef InitState, 410 ExplodedNodeSet &Dst) { 411 bool DidNotFinish = ExecuteWorkList(L, Steps, InitState); 412 for (ExplodedGraph::eop_iterator I = G.eop_begin(), E = G.eop_end(); I != E; 413 ++I) { 414 Dst.Add(*I); 415 } 416 return DidNotFinish; 417 } 418 419 void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) { 420 421 const CFGBlock *Blk = L.getDst(); 422 NodeBuilderContext BuilderCtx(*this, Blk, Pred); 423 424 // Mark this block as visited. 425 const LocationContext *LC = Pred->getLocationContext(); 426 FunctionSummaries->markVisitedBasicBlock(Blk->getBlockID(), 427 LC->getDecl(), 428 LC->getCFG()->getNumBlockIDs()); 429 430 // Check if we are entering the EXIT block. 431 if (Blk == &(L.getLocationContext()->getCFG()->getExit())) { 432 433 assert (L.getLocationContext()->getCFG()->getExit().size() == 0 434 && "EXIT block cannot contain Stmts."); 435 436 // Get return statement.. 437 const ReturnStmt *RS = nullptr; 438 if (!L.getSrc()->empty()) { 439 if (Optional<CFGStmt> LastStmt = L.getSrc()->back().getAs<CFGStmt>()) { 440 RS = dyn_cast<ReturnStmt>(LastStmt->getStmt()); 441 } 442 } 443 444 // Process the final state transition. 445 SubEng.processEndOfFunction(BuilderCtx, Pred, RS); 446 447 // This path is done. Don't enqueue any more nodes. 448 return; 449 } 450 451 // Call into the SubEngine to process entering the CFGBlock. 452 ExplodedNodeSet dstNodes; 453 BlockEntrance BE(Blk, Pred->getLocationContext()); 454 NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE); 455 SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred); 456 457 // Auto-generate a node. 458 if (!nodeBuilder.hasGeneratedNodes()) { 459 nodeBuilder.generateNode(Pred->State, Pred); 460 } 461 462 // Enqueue nodes onto the worklist. 463 enqueue(dstNodes); 464 } 465 466 void CoreEngine::HandleBlockEntrance(const BlockEntrance &L, 467 ExplodedNode *Pred) { 468 469 // Increment the block counter. 470 const LocationContext *LC = Pred->getLocationContext(); 471 unsigned BlockId = L.getBlock()->getBlockID(); 472 BlockCounter Counter = WList->getBlockCounter(); 473 Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(), 474 BlockId); 475 WList->setBlockCounter(Counter); 476 477 // Process the entrance of the block. 478 if (Optional<CFGElement> E = L.getFirstElement()) { 479 NodeBuilderContext Ctx(*this, L.getBlock(), Pred); 480 SubEng.processCFGElement(*E, Pred, 0, &Ctx); 481 } 482 else 483 HandleBlockExit(L.getBlock(), Pred); 484 } 485 486 void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) { 487 488 if (const Stmt *Term = B->getTerminator()) { 489 switch (Term->getStmtClass()) { 490 default: 491 llvm_unreachable("Analysis for this terminator not implemented."); 492 493 case Stmt::CXXBindTemporaryExprClass: 494 HandleCleanupTemporaryBranch( 495 cast<CXXBindTemporaryExpr>(B->getTerminator().getStmt()), B, Pred); 496 return; 497 498 // Model static initializers. 499 case Stmt::DeclStmtClass: 500 HandleStaticInit(cast<DeclStmt>(Term), B, Pred); 501 return; 502 503 case Stmt::BinaryOperatorClass: // '&&' and '||' 504 HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred); 505 return; 506 507 case Stmt::BinaryConditionalOperatorClass: 508 case Stmt::ConditionalOperatorClass: 509 HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(), 510 Term, B, Pred); 511 return; 512 513 // FIXME: Use constant-folding in CFG construction to simplify this 514 // case. 515 516 case Stmt::ChooseExprClass: 517 HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred); 518 return; 519 520 case Stmt::CXXTryStmtClass: { 521 // Generate a node for each of the successors. 522 // Our logic for EH analysis can certainly be improved. 523 for (CFGBlock::const_succ_iterator it = B->succ_begin(), 524 et = B->succ_end(); it != et; ++it) { 525 if (const CFGBlock *succ = *it) { 526 generateNode(BlockEdge(B, succ, Pred->getLocationContext()), 527 Pred->State, Pred); 528 } 529 } 530 return; 531 } 532 533 case Stmt::DoStmtClass: 534 HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred); 535 return; 536 537 case Stmt::CXXForRangeStmtClass: 538 HandleBranch(cast<CXXForRangeStmt>(Term)->getCond(), Term, B, Pred); 539 return; 540 541 case Stmt::ForStmtClass: 542 HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred); 543 return; 544 545 case Stmt::ContinueStmtClass: 546 case Stmt::BreakStmtClass: 547 case Stmt::GotoStmtClass: 548 break; 549 550 case Stmt::IfStmtClass: 551 HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred); 552 return; 553 554 case Stmt::IndirectGotoStmtClass: { 555 // Only 1 successor: the indirect goto dispatch block. 556 assert (B->succ_size() == 1); 557 558 IndirectGotoNodeBuilder 559 builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(), 560 *(B->succ_begin()), this); 561 562 SubEng.processIndirectGoto(builder); 563 return; 564 } 565 566 case Stmt::ObjCForCollectionStmtClass: { 567 // In the case of ObjCForCollectionStmt, it appears twice in a CFG: 568 // 569 // (1) inside a basic block, which represents the binding of the 570 // 'element' variable to a value. 571 // (2) in a terminator, which represents the branch. 572 // 573 // For (1), subengines will bind a value (i.e., 0 or 1) indicating 574 // whether or not collection contains any more elements. We cannot 575 // just test to see if the element is nil because a container can 576 // contain nil elements. 577 HandleBranch(Term, Term, B, Pred); 578 return; 579 } 580 581 case Stmt::SwitchStmtClass: { 582 SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(), 583 this); 584 585 SubEng.processSwitch(builder); 586 return; 587 } 588 589 case Stmt::WhileStmtClass: 590 HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred); 591 return; 592 } 593 } 594 595 assert (B->succ_size() == 1 && 596 "Blocks with no terminator should have at most 1 successor."); 597 598 generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()), 599 Pred->State, Pred); 600 } 601 602 void CoreEngine::HandleCallEnter(const CallEnter &CE, ExplodedNode *Pred) { 603 NodeBuilderContext BuilderCtx(*this, CE.getEntry(), Pred); 604 SubEng.processCallEnter(BuilderCtx, CE, Pred); 605 } 606 607 void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term, 608 const CFGBlock * B, ExplodedNode *Pred) { 609 assert(B->succ_size() == 2); 610 NodeBuilderContext Ctx(*this, B, Pred); 611 ExplodedNodeSet Dst; 612 SubEng.processBranch(Cond, Term, Ctx, Pred, Dst, 613 *(B->succ_begin()), *(B->succ_begin()+1)); 614 // Enqueue the new frontier onto the worklist. 615 enqueue(Dst); 616 } 617 618 void CoreEngine::HandleCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE, 619 const CFGBlock *B, 620 ExplodedNode *Pred) { 621 assert(B->succ_size() == 2); 622 NodeBuilderContext Ctx(*this, B, Pred); 623 ExplodedNodeSet Dst; 624 SubEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()), 625 *(B->succ_begin() + 1)); 626 // Enqueue the new frontier onto the worklist. 627 enqueue(Dst); 628 } 629 630 void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B, 631 ExplodedNode *Pred) { 632 assert(B->succ_size() == 2); 633 NodeBuilderContext Ctx(*this, B, Pred); 634 ExplodedNodeSet Dst; 635 SubEng.processStaticInitializer(DS, Ctx, Pred, Dst, 636 *(B->succ_begin()), *(B->succ_begin()+1)); 637 // Enqueue the new frontier onto the worklist. 638 enqueue(Dst); 639 } 640 641 642 void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx, 643 ExplodedNode *Pred) { 644 assert(B); 645 assert(!B->empty()); 646 647 if (StmtIdx == B->size()) 648 HandleBlockExit(B, Pred); 649 else { 650 NodeBuilderContext Ctx(*this, B, Pred); 651 SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx); 652 } 653 } 654 655 /// generateNode - Utility method to generate nodes, hook up successors, 656 /// and add nodes to the worklist. 657 void CoreEngine::generateNode(const ProgramPoint &Loc, 658 ProgramStateRef State, 659 ExplodedNode *Pred) { 660 661 bool IsNew; 662 ExplodedNode *Node = G.getNode(Loc, State, false, &IsNew); 663 664 if (Pred) 665 Node->addPredecessor(Pred, G); // Link 'Node' with its predecessor. 666 else { 667 assert (IsNew); 668 G.addRoot(Node); // 'Node' has no predecessor. Make it a root. 669 } 670 671 // Only add 'Node' to the worklist if it was freshly generated. 672 if (IsNew) WList->enqueue(Node); 673 } 674 675 void CoreEngine::enqueueStmtNode(ExplodedNode *N, 676 const CFGBlock *Block, unsigned Idx) { 677 assert(Block); 678 assert (!N->isSink()); 679 680 // Check if this node entered a callee. 681 if (N->getLocation().getAs<CallEnter>()) { 682 // Still use the index of the CallExpr. It's needed to create the callee 683 // StackFrameContext. 684 WList->enqueue(N, Block, Idx); 685 return; 686 } 687 688 // Do not create extra nodes. Move to the next CFG element. 689 if (N->getLocation().getAs<PostInitializer>() || 690 N->getLocation().getAs<PostImplicitCall>()|| 691 N->getLocation().getAs<LoopExit>()) { 692 WList->enqueue(N, Block, Idx+1); 693 return; 694 } 695 696 if (N->getLocation().getAs<EpsilonPoint>()) { 697 WList->enqueue(N, Block, Idx); 698 return; 699 } 700 701 if ((*Block)[Idx].getKind() == CFGElement::NewAllocator) { 702 WList->enqueue(N, Block, Idx+1); 703 return; 704 } 705 706 // At this point, we know we're processing a normal statement. 707 CFGStmt CS = (*Block)[Idx].castAs<CFGStmt>(); 708 PostStmt Loc(CS.getStmt(), N->getLocationContext()); 709 710 if (Loc == N->getLocation().withTag(nullptr)) { 711 // Note: 'N' should be a fresh node because otherwise it shouldn't be 712 // a member of Deferred. 713 WList->enqueue(N, Block, Idx+1); 714 return; 715 } 716 717 bool IsNew; 718 ExplodedNode *Succ = G.getNode(Loc, N->getState(), false, &IsNew); 719 Succ->addPredecessor(N, G); 720 721 if (IsNew) 722 WList->enqueue(Succ, Block, Idx+1); 723 } 724 725 ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N, 726 const ReturnStmt *RS) { 727 // Create a CallExitBegin node and enqueue it. 728 const StackFrameContext *LocCtx 729 = cast<StackFrameContext>(N->getLocationContext()); 730 731 // Use the callee location context. 732 CallExitBegin Loc(LocCtx, RS); 733 734 bool isNew; 735 ExplodedNode *Node = G.getNode(Loc, N->getState(), false, &isNew); 736 Node->addPredecessor(N, G); 737 return isNew ? Node : nullptr; 738 } 739 740 741 void CoreEngine::enqueue(ExplodedNodeSet &Set) { 742 for (ExplodedNodeSet::iterator I = Set.begin(), 743 E = Set.end(); I != E; ++I) { 744 WList->enqueue(*I); 745 } 746 } 747 748 void CoreEngine::enqueue(ExplodedNodeSet &Set, 749 const CFGBlock *Block, unsigned Idx) { 750 for (ExplodedNodeSet::iterator I = Set.begin(), 751 E = Set.end(); I != E; ++I) { 752 enqueueStmtNode(*I, Block, Idx); 753 } 754 } 755 756 void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set, const ReturnStmt *RS) { 757 for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) { 758 ExplodedNode *N = *I; 759 // If we are in an inlined call, generate CallExitBegin node. 760 if (N->getLocationContext()->getParent()) { 761 N = generateCallExitBeginNode(N, RS); 762 if (N) 763 WList->enqueue(N); 764 } else { 765 // TODO: We should run remove dead bindings here. 766 G.addEndOfPath(N); 767 NumPathsExplored++; 768 } 769 } 770 } 771 772 773 void NodeBuilder::anchor() { } 774 775 ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc, 776 ProgramStateRef State, 777 ExplodedNode *FromN, 778 bool MarkAsSink) { 779 HasGeneratedNodes = true; 780 bool IsNew; 781 ExplodedNode *N = C.Eng.G.getNode(Loc, State, MarkAsSink, &IsNew); 782 N->addPredecessor(FromN, C.Eng.G); 783 Frontier.erase(FromN); 784 785 if (!IsNew) 786 return nullptr; 787 788 if (!MarkAsSink) 789 Frontier.Add(N); 790 791 return N; 792 } 793 794 void NodeBuilderWithSinks::anchor() { } 795 796 StmtNodeBuilder::~StmtNodeBuilder() { 797 if (EnclosingBldr) 798 for (ExplodedNodeSet::iterator I = Frontier.begin(), 799 E = Frontier.end(); I != E; ++I ) 800 EnclosingBldr->addNodes(*I); 801 } 802 803 void BranchNodeBuilder::anchor() { } 804 805 ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State, 806 bool branch, 807 ExplodedNode *NodePred) { 808 // If the branch has been marked infeasible we should not generate a node. 809 if (!isFeasible(branch)) 810 return nullptr; 811 812 ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF, 813 NodePred->getLocationContext()); 814 ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred); 815 return Succ; 816 } 817 818 ExplodedNode* 819 IndirectGotoNodeBuilder::generateNode(const iterator &I, 820 ProgramStateRef St, 821 bool IsSink) { 822 bool IsNew; 823 ExplodedNode *Succ = 824 Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()), 825 St, IsSink, &IsNew); 826 Succ->addPredecessor(Pred, Eng.G); 827 828 if (!IsNew) 829 return nullptr; 830 831 if (!IsSink) 832 Eng.WList->enqueue(Succ); 833 834 return Succ; 835 } 836 837 838 ExplodedNode* 839 SwitchNodeBuilder::generateCaseStmtNode(const iterator &I, 840 ProgramStateRef St) { 841 842 bool IsNew; 843 ExplodedNode *Succ = 844 Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()), 845 St, false, &IsNew); 846 Succ->addPredecessor(Pred, Eng.G); 847 if (!IsNew) 848 return nullptr; 849 850 Eng.WList->enqueue(Succ); 851 return Succ; 852 } 853 854 855 ExplodedNode* 856 SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St, 857 bool IsSink) { 858 // Get the block for the default case. 859 assert(Src->succ_rbegin() != Src->succ_rend()); 860 CFGBlock *DefaultBlock = *Src->succ_rbegin(); 861 862 // Sanity check for default blocks that are unreachable and not caught 863 // by earlier stages. 864 if (!DefaultBlock) 865 return nullptr; 866 867 bool IsNew; 868 ExplodedNode *Succ = 869 Eng.G.getNode(BlockEdge(Src, DefaultBlock, Pred->getLocationContext()), 870 St, IsSink, &IsNew); 871 Succ->addPredecessor(Pred, Eng.G); 872 873 if (!IsNew) 874 return nullptr; 875 876 if (!IsSink) 877 Eng.WList->enqueue(Succ); 878 879 return Succ; 880 } 881