1 //===- ThreadSafety.cpp ---------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race 11 // conditions), based off of an annotation system. 12 // 13 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html 14 // for more information. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "clang/Analysis/Analyses/ThreadSafety.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/Decl.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclGroup.h" 23 #include "clang/AST/Expr.h" 24 #include "clang/AST/ExprCXX.h" 25 #include "clang/AST/OperationKinds.h" 26 #include "clang/AST/Stmt.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/AST/Type.h" 29 #include "clang/Analysis/Analyses/PostOrderCFGView.h" 30 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" 31 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" 32 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" 33 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h" 34 #include "clang/Analysis/AnalysisDeclContext.h" 35 #include "clang/Analysis/CFG.h" 36 #include "clang/Basic/LLVM.h" 37 #include "clang/Basic/OperatorKinds.h" 38 #include "clang/Basic/SourceLocation.h" 39 #include "clang/Basic/Specifiers.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/DenseMap.h" 42 #include "llvm/ADT/ImmutableMap.h" 43 #include "llvm/ADT/Optional.h" 44 #include "llvm/ADT/STLExtras.h" 45 #include "llvm/ADT/SmallVector.h" 46 #include "llvm/ADT/StringRef.h" 47 #include "llvm/Support/Allocator.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/ErrorHandling.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include <algorithm> 52 #include <cassert> 53 #include <functional> 54 #include <iterator> 55 #include <memory> 56 #include <string> 57 #include <type_traits> 58 #include <utility> 59 #include <vector> 60 61 using namespace clang; 62 using namespace threadSafety; 63 64 // Key method definition 65 ThreadSafetyHandler::~ThreadSafetyHandler() = default; 66 67 namespace { 68 69 class TILPrinter : 70 public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {}; 71 72 } // namespace 73 74 /// Issue a warning about an invalid lock expression 75 static void warnInvalidLock(ThreadSafetyHandler &Handler, 76 const Expr *MutexExp, const NamedDecl *D, 77 const Expr *DeclExp, StringRef Kind) { 78 SourceLocation Loc; 79 if (DeclExp) 80 Loc = DeclExp->getExprLoc(); 81 82 // FIXME: add a note about the attribute location in MutexExp or D 83 if (Loc.isValid()) 84 Handler.handleInvalidLockExp(Kind, Loc); 85 } 86 87 namespace { 88 89 /// A set of CapabilityExpr objects, which are compiled from thread safety 90 /// attributes on a function. 91 class CapExprSet : public SmallVector<CapabilityExpr, 4> { 92 public: 93 /// Push M onto list, but discard duplicates. 94 void push_back_nodup(const CapabilityExpr &CapE) { 95 iterator It = std::find_if(begin(), end(), 96 [=](const CapabilityExpr &CapE2) { 97 return CapE.equals(CapE2); 98 }); 99 if (It == end()) 100 push_back(CapE); 101 } 102 }; 103 104 class FactManager; 105 class FactSet; 106 107 /// This is a helper class that stores a fact that is known at a 108 /// particular point in program execution. Currently, a fact is a capability, 109 /// along with additional information, such as where it was acquired, whether 110 /// it is exclusive or shared, etc. 111 /// 112 /// FIXME: this analysis does not currently support re-entrant locking. 113 class FactEntry : public CapabilityExpr { 114 private: 115 /// Exclusive or shared. 116 LockKind LKind; 117 118 /// Where it was acquired. 119 SourceLocation AcquireLoc; 120 121 /// True if the lock was asserted. 122 bool Asserted; 123 124 /// True if the lock was declared. 125 bool Declared; 126 127 public: 128 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 129 bool Asrt, bool Declrd = false) 130 : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt), 131 Declared(Declrd) {} 132 virtual ~FactEntry() = default; 133 134 LockKind kind() const { return LKind; } 135 SourceLocation loc() const { return AcquireLoc; } 136 bool asserted() const { return Asserted; } 137 bool declared() const { return Declared; } 138 139 void setDeclared(bool D) { Declared = D; } 140 141 virtual void 142 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 143 SourceLocation JoinLoc, LockErrorKind LEK, 144 ThreadSafetyHandler &Handler) const = 0; 145 virtual void handleLock(FactSet &FSet, FactManager &FactMan, 146 const FactEntry &entry, ThreadSafetyHandler &Handler, 147 StringRef DiagKind) const = 0; 148 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, 149 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 150 bool FullyRemove, ThreadSafetyHandler &Handler, 151 StringRef DiagKind) const = 0; 152 153 // Return true if LKind >= LK, where exclusive > shared 154 bool isAtLeast(LockKind LK) { 155 return (LKind == LK_Exclusive) || (LK == LK_Shared); 156 } 157 }; 158 159 using FactID = unsigned short; 160 161 /// FactManager manages the memory for all facts that are created during 162 /// the analysis of a single routine. 163 class FactManager { 164 private: 165 std::vector<std::unique_ptr<FactEntry>> Facts; 166 167 public: 168 FactID newFact(std::unique_ptr<FactEntry> Entry) { 169 Facts.push_back(std::move(Entry)); 170 return static_cast<unsigned short>(Facts.size() - 1); 171 } 172 173 const FactEntry &operator[](FactID F) const { return *Facts[F]; } 174 FactEntry &operator[](FactID F) { return *Facts[F]; } 175 }; 176 177 /// A FactSet is the set of facts that are known to be true at a 178 /// particular program point. FactSets must be small, because they are 179 /// frequently copied, and are thus implemented as a set of indices into a 180 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 181 /// locks, so we can get away with doing a linear search for lookup. Note 182 /// that a hashtable or map is inappropriate in this case, because lookups 183 /// may involve partial pattern matches, rather than exact matches. 184 class FactSet { 185 private: 186 using FactVec = SmallVector<FactID, 4>; 187 188 FactVec FactIDs; 189 190 public: 191 using iterator = FactVec::iterator; 192 using const_iterator = FactVec::const_iterator; 193 194 iterator begin() { return FactIDs.begin(); } 195 const_iterator begin() const { return FactIDs.begin(); } 196 197 iterator end() { return FactIDs.end(); } 198 const_iterator end() const { return FactIDs.end(); } 199 200 bool isEmpty() const { return FactIDs.size() == 0; } 201 202 // Return true if the set contains only negative facts 203 bool isEmpty(FactManager &FactMan) const { 204 for (const auto FID : *this) { 205 if (!FactMan[FID].negative()) 206 return false; 207 } 208 return true; 209 } 210 211 void addLockByID(FactID ID) { FactIDs.push_back(ID); } 212 213 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { 214 FactID F = FM.newFact(std::move(Entry)); 215 FactIDs.push_back(F); 216 return F; 217 } 218 219 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { 220 unsigned n = FactIDs.size(); 221 if (n == 0) 222 return false; 223 224 for (unsigned i = 0; i < n-1; ++i) { 225 if (FM[FactIDs[i]].matches(CapE)) { 226 FactIDs[i] = FactIDs[n-1]; 227 FactIDs.pop_back(); 228 return true; 229 } 230 } 231 if (FM[FactIDs[n-1]].matches(CapE)) { 232 FactIDs.pop_back(); 233 return true; 234 } 235 return false; 236 } 237 238 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { 239 return std::find_if(begin(), end(), [&](FactID ID) { 240 return FM[ID].matches(CapE); 241 }); 242 } 243 244 FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { 245 auto I = std::find_if(begin(), end(), [&](FactID ID) { 246 return FM[ID].matches(CapE); 247 }); 248 return I != end() ? &FM[*I] : nullptr; 249 } 250 251 FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const { 252 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 253 return FM[ID].matchesUniv(CapE); 254 }); 255 return I != end() ? &FM[*I] : nullptr; 256 } 257 258 FactEntry *findPartialMatch(FactManager &FM, 259 const CapabilityExpr &CapE) const { 260 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 261 return FM[ID].partiallyMatches(CapE); 262 }); 263 return I != end() ? &FM[*I] : nullptr; 264 } 265 266 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { 267 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 268 return FM[ID].valueDecl() == Vd; 269 }); 270 return I != end(); 271 } 272 }; 273 274 class ThreadSafetyAnalyzer; 275 276 } // namespace 277 278 namespace clang { 279 namespace threadSafety { 280 281 class BeforeSet { 282 private: 283 using BeforeVect = SmallVector<const ValueDecl *, 4>; 284 285 struct BeforeInfo { 286 BeforeVect Vect; 287 int Visited = 0; 288 289 BeforeInfo() = default; 290 BeforeInfo(BeforeInfo &&) = default; 291 }; 292 293 using BeforeMap = 294 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; 295 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; 296 297 public: 298 BeforeSet() = default; 299 300 BeforeInfo* insertAttrExprs(const ValueDecl* Vd, 301 ThreadSafetyAnalyzer& Analyzer); 302 303 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, 304 ThreadSafetyAnalyzer &Analyzer); 305 306 void checkBeforeAfter(const ValueDecl* Vd, 307 const FactSet& FSet, 308 ThreadSafetyAnalyzer& Analyzer, 309 SourceLocation Loc, StringRef CapKind); 310 311 private: 312 BeforeMap BMap; 313 CycleMap CycMap; 314 }; 315 316 } // namespace threadSafety 317 } // namespace clang 318 319 namespace { 320 321 class LocalVariableMap; 322 323 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; 324 325 /// A side (entry or exit) of a CFG node. 326 enum CFGBlockSide { CBS_Entry, CBS_Exit }; 327 328 /// CFGBlockInfo is a struct which contains all the information that is 329 /// maintained for each block in the CFG. See LocalVariableMap for more 330 /// information about the contexts. 331 struct CFGBlockInfo { 332 // Lockset held at entry to block 333 FactSet EntrySet; 334 335 // Lockset held at exit from block 336 FactSet ExitSet; 337 338 // Context held at entry to block 339 LocalVarContext EntryContext; 340 341 // Context held at exit from block 342 LocalVarContext ExitContext; 343 344 // Location of first statement in block 345 SourceLocation EntryLoc; 346 347 // Location of last statement in block. 348 SourceLocation ExitLoc; 349 350 // Used to replay contexts later 351 unsigned EntryIndex; 352 353 // Is this block reachable? 354 bool Reachable = false; 355 356 const FactSet &getSet(CFGBlockSide Side) const { 357 return Side == CBS_Entry ? EntrySet : ExitSet; 358 } 359 360 SourceLocation getLocation(CFGBlockSide Side) const { 361 return Side == CBS_Entry ? EntryLoc : ExitLoc; 362 } 363 364 private: 365 CFGBlockInfo(LocalVarContext EmptyCtx) 366 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} 367 368 public: 369 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 370 }; 371 372 // A LocalVariableMap maintains a map from local variables to their currently 373 // valid definitions. It provides SSA-like functionality when traversing the 374 // CFG. Like SSA, each definition or assignment to a variable is assigned a 375 // unique name (an integer), which acts as the SSA name for that definition. 376 // The total set of names is shared among all CFG basic blocks. 377 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs 378 // with their SSA-names. Instead, we compute a Context for each point in the 379 // code, which maps local variables to the appropriate SSA-name. This map 380 // changes with each assignment. 381 // 382 // The map is computed in a single pass over the CFG. Subsequent analyses can 383 // then query the map to find the appropriate Context for a statement, and use 384 // that Context to look up the definitions of variables. 385 class LocalVariableMap { 386 public: 387 using Context = LocalVarContext; 388 389 /// A VarDefinition consists of an expression, representing the value of the 390 /// variable, along with the context in which that expression should be 391 /// interpreted. A reference VarDefinition does not itself contain this 392 /// information, but instead contains a pointer to a previous VarDefinition. 393 struct VarDefinition { 394 public: 395 friend class LocalVariableMap; 396 397 // The original declaration for this variable. 398 const NamedDecl *Dec; 399 400 // The expression for this variable, OR 401 const Expr *Exp = nullptr; 402 403 // Reference to another VarDefinition 404 unsigned Ref = 0; 405 406 // The map with which Exp should be interpreted. 407 Context Ctx; 408 409 bool isReference() { return !Exp; } 410 411 private: 412 // Create ordinary variable definition 413 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 414 : Dec(D), Exp(E), Ctx(C) {} 415 416 // Create reference to previous definition 417 VarDefinition(const NamedDecl *D, unsigned R, Context C) 418 : Dec(D), Ref(R), Ctx(C) {} 419 }; 420 421 private: 422 Context::Factory ContextFactory; 423 std::vector<VarDefinition> VarDefinitions; 424 std::vector<unsigned> CtxIndices; 425 std::vector<std::pair<const Stmt *, Context>> SavedContexts; 426 427 public: 428 LocalVariableMap() { 429 // index 0 is a placeholder for undefined variables (aka phi-nodes). 430 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); 431 } 432 433 /// Look up a definition, within the given context. 434 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 435 const unsigned *i = Ctx.lookup(D); 436 if (!i) 437 return nullptr; 438 assert(*i < VarDefinitions.size()); 439 return &VarDefinitions[*i]; 440 } 441 442 /// Look up the definition for D within the given context. Returns 443 /// NULL if the expression is not statically known. If successful, also 444 /// modifies Ctx to hold the context of the return Expr. 445 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 446 const unsigned *P = Ctx.lookup(D); 447 if (!P) 448 return nullptr; 449 450 unsigned i = *P; 451 while (i > 0) { 452 if (VarDefinitions[i].Exp) { 453 Ctx = VarDefinitions[i].Ctx; 454 return VarDefinitions[i].Exp; 455 } 456 i = VarDefinitions[i].Ref; 457 } 458 return nullptr; 459 } 460 461 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 462 463 /// Return the next context after processing S. This function is used by 464 /// clients of the class to get the appropriate context when traversing the 465 /// CFG. It must be called for every assignment or DeclStmt. 466 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { 467 if (SavedContexts[CtxIndex+1].first == S) { 468 CtxIndex++; 469 Context Result = SavedContexts[CtxIndex].second; 470 return Result; 471 } 472 return C; 473 } 474 475 void dumpVarDefinitionName(unsigned i) { 476 if (i == 0) { 477 llvm::errs() << "Undefined"; 478 return; 479 } 480 const NamedDecl *Dec = VarDefinitions[i].Dec; 481 if (!Dec) { 482 llvm::errs() << "<<NULL>>"; 483 return; 484 } 485 Dec->printName(llvm::errs()); 486 llvm::errs() << "." << i << " " << ((const void*) Dec); 487 } 488 489 /// Dumps an ASCII representation of the variable map to llvm::errs() 490 void dump() { 491 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 492 const Expr *Exp = VarDefinitions[i].Exp; 493 unsigned Ref = VarDefinitions[i].Ref; 494 495 dumpVarDefinitionName(i); 496 llvm::errs() << " = "; 497 if (Exp) Exp->dump(); 498 else { 499 dumpVarDefinitionName(Ref); 500 llvm::errs() << "\n"; 501 } 502 } 503 } 504 505 /// Dumps an ASCII representation of a Context to llvm::errs() 506 void dumpContext(Context C) { 507 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 508 const NamedDecl *D = I.getKey(); 509 D->printName(llvm::errs()); 510 const unsigned *i = C.lookup(D); 511 llvm::errs() << " -> "; 512 dumpVarDefinitionName(*i); 513 llvm::errs() << "\n"; 514 } 515 } 516 517 /// Builds the variable map. 518 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, 519 std::vector<CFGBlockInfo> &BlockInfo); 520 521 protected: 522 friend class VarMapBuilder; 523 524 // Get the current context index 525 unsigned getContextIndex() { return SavedContexts.size()-1; } 526 527 // Save the current context for later replay 528 void saveContext(const Stmt *S, Context C) { 529 SavedContexts.push_back(std::make_pair(S, C)); 530 } 531 532 // Adds a new definition to the given context, and returns a new context. 533 // This method should be called when declaring a new variable. 534 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { 535 assert(!Ctx.contains(D)); 536 unsigned newID = VarDefinitions.size(); 537 Context NewCtx = ContextFactory.add(Ctx, D, newID); 538 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 539 return NewCtx; 540 } 541 542 // Add a new reference to an existing definition. 543 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 544 unsigned newID = VarDefinitions.size(); 545 Context NewCtx = ContextFactory.add(Ctx, D, newID); 546 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 547 return NewCtx; 548 } 549 550 // Updates a definition only if that definition is already in the map. 551 // This method should be called when assigning to an existing variable. 552 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 553 if (Ctx.contains(D)) { 554 unsigned newID = VarDefinitions.size(); 555 Context NewCtx = ContextFactory.remove(Ctx, D); 556 NewCtx = ContextFactory.add(NewCtx, D, newID); 557 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 558 return NewCtx; 559 } 560 return Ctx; 561 } 562 563 // Removes a definition from the context, but keeps the variable name 564 // as a valid variable. The index 0 is a placeholder for cleared definitions. 565 Context clearDefinition(const NamedDecl *D, Context Ctx) { 566 Context NewCtx = Ctx; 567 if (NewCtx.contains(D)) { 568 NewCtx = ContextFactory.remove(NewCtx, D); 569 NewCtx = ContextFactory.add(NewCtx, D, 0); 570 } 571 return NewCtx; 572 } 573 574 // Remove a definition entirely frmo the context. 575 Context removeDefinition(const NamedDecl *D, Context Ctx) { 576 Context NewCtx = Ctx; 577 if (NewCtx.contains(D)) { 578 NewCtx = ContextFactory.remove(NewCtx, D); 579 } 580 return NewCtx; 581 } 582 583 Context intersectContexts(Context C1, Context C2); 584 Context createReferenceContext(Context C); 585 void intersectBackEdge(Context C1, Context C2); 586 }; 587 588 } // namespace 589 590 // This has to be defined after LocalVariableMap. 591 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 592 return CFGBlockInfo(M.getEmptyContext()); 593 } 594 595 namespace { 596 597 /// Visitor which builds a LocalVariableMap 598 class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { 599 public: 600 LocalVariableMap* VMap; 601 LocalVariableMap::Context Ctx; 602 603 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 604 : VMap(VM), Ctx(C) {} 605 606 void VisitDeclStmt(const DeclStmt *S); 607 void VisitBinaryOperator(const BinaryOperator *BO); 608 }; 609 610 } // namespace 611 612 // Add new local variables to the variable map 613 void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { 614 bool modifiedCtx = false; 615 const DeclGroupRef DGrp = S->getDeclGroup(); 616 for (const auto *D : DGrp) { 617 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { 618 const Expr *E = VD->getInit(); 619 620 // Add local variables with trivial type to the variable map 621 QualType T = VD->getType(); 622 if (T.isTrivialType(VD->getASTContext())) { 623 Ctx = VMap->addDefinition(VD, E, Ctx); 624 modifiedCtx = true; 625 } 626 } 627 } 628 if (modifiedCtx) 629 VMap->saveContext(S, Ctx); 630 } 631 632 // Update local variable definitions in variable map 633 void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { 634 if (!BO->isAssignmentOp()) 635 return; 636 637 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 638 639 // Update the variable map and current context. 640 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 641 const ValueDecl *VDec = DRE->getDecl(); 642 if (Ctx.lookup(VDec)) { 643 if (BO->getOpcode() == BO_Assign) 644 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 645 else 646 // FIXME -- handle compound assignment operators 647 Ctx = VMap->clearDefinition(VDec, Ctx); 648 VMap->saveContext(BO, Ctx); 649 } 650 } 651 } 652 653 // Computes the intersection of two contexts. The intersection is the 654 // set of variables which have the same definition in both contexts; 655 // variables with different definitions are discarded. 656 LocalVariableMap::Context 657 LocalVariableMap::intersectContexts(Context C1, Context C2) { 658 Context Result = C1; 659 for (const auto &P : C1) { 660 const NamedDecl *Dec = P.first; 661 const unsigned *i2 = C2.lookup(Dec); 662 if (!i2) // variable doesn't exist on second path 663 Result = removeDefinition(Dec, Result); 664 else if (*i2 != P.second) // variable exists, but has different definition 665 Result = clearDefinition(Dec, Result); 666 } 667 return Result; 668 } 669 670 // For every variable in C, create a new variable that refers to the 671 // definition in C. Return a new context that contains these new variables. 672 // (We use this for a naive implementation of SSA on loop back-edges.) 673 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 674 Context Result = getEmptyContext(); 675 for (const auto &P : C) 676 Result = addReference(P.first, P.second, Result); 677 return Result; 678 } 679 680 // This routine also takes the intersection of C1 and C2, but it does so by 681 // altering the VarDefinitions. C1 must be the result of an earlier call to 682 // createReferenceContext. 683 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 684 for (const auto &P : C1) { 685 unsigned i1 = P.second; 686 VarDefinition *VDef = &VarDefinitions[i1]; 687 assert(VDef->isReference()); 688 689 const unsigned *i2 = C2.lookup(P.first); 690 if (!i2 || (*i2 != i1)) 691 VDef->Ref = 0; // Mark this variable as undefined 692 } 693 } 694 695 // Traverse the CFG in topological order, so all predecessors of a block 696 // (excluding back-edges) are visited before the block itself. At 697 // each point in the code, we calculate a Context, which holds the set of 698 // variable definitions which are visible at that point in execution. 699 // Visible variables are mapped to their definitions using an array that 700 // contains all definitions. 701 // 702 // At join points in the CFG, the set is computed as the intersection of 703 // the incoming sets along each edge, E.g. 704 // 705 // { Context | VarDefinitions } 706 // int x = 0; { x -> x1 | x1 = 0 } 707 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 708 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 709 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 710 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 711 // 712 // This is essentially a simpler and more naive version of the standard SSA 713 // algorithm. Those definitions that remain in the intersection are from blocks 714 // that strictly dominate the current block. We do not bother to insert proper 715 // phi nodes, because they are not used in our analysis; instead, wherever 716 // a phi node would be required, we simply remove that definition from the 717 // context (E.g. x above). 718 // 719 // The initial traversal does not capture back-edges, so those need to be 720 // handled on a separate pass. Whenever the first pass encounters an 721 // incoming back edge, it duplicates the context, creating new definitions 722 // that refer back to the originals. (These correspond to places where SSA 723 // might have to insert a phi node.) On the second pass, these definitions are 724 // set to NULL if the variable has changed on the back-edge (i.e. a phi 725 // node was actually required.) E.g. 726 // 727 // { Context | VarDefinitions } 728 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 729 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 730 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 731 // ... { y -> y1 | x3 = 2, x2 = 1, ... } 732 void LocalVariableMap::traverseCFG(CFG *CFGraph, 733 const PostOrderCFGView *SortedGraph, 734 std::vector<CFGBlockInfo> &BlockInfo) { 735 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 736 737 CtxIndices.resize(CFGraph->getNumBlockIDs()); 738 739 for (const auto *CurrBlock : *SortedGraph) { 740 int CurrBlockID = CurrBlock->getBlockID(); 741 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 742 743 VisitedBlocks.insert(CurrBlock); 744 745 // Calculate the entry context for the current block 746 bool HasBackEdges = false; 747 bool CtxInit = true; 748 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 749 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 750 // if *PI -> CurrBlock is a back edge, so skip it 751 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { 752 HasBackEdges = true; 753 continue; 754 } 755 756 int PrevBlockID = (*PI)->getBlockID(); 757 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 758 759 if (CtxInit) { 760 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 761 CtxInit = false; 762 } 763 else { 764 CurrBlockInfo->EntryContext = 765 intersectContexts(CurrBlockInfo->EntryContext, 766 PrevBlockInfo->ExitContext); 767 } 768 } 769 770 // Duplicate the context if we have back-edges, so we can call 771 // intersectBackEdges later. 772 if (HasBackEdges) 773 CurrBlockInfo->EntryContext = 774 createReferenceContext(CurrBlockInfo->EntryContext); 775 776 // Create a starting context index for the current block 777 saveContext(nullptr, CurrBlockInfo->EntryContext); 778 CurrBlockInfo->EntryIndex = getContextIndex(); 779 780 // Visit all the statements in the basic block. 781 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 782 for (const auto &BI : *CurrBlock) { 783 switch (BI.getKind()) { 784 case CFGElement::Statement: { 785 CFGStmt CS = BI.castAs<CFGStmt>(); 786 VMapBuilder.Visit(CS.getStmt()); 787 break; 788 } 789 default: 790 break; 791 } 792 } 793 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 794 795 // Mark variables on back edges as "unknown" if they've been changed. 796 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 797 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 798 // if CurrBlock -> *SI is *not* a back edge 799 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 800 continue; 801 802 CFGBlock *FirstLoopBlock = *SI; 803 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 804 Context LoopEnd = CurrBlockInfo->ExitContext; 805 intersectBackEdge(LoopBegin, LoopEnd); 806 } 807 } 808 809 // Put an extra entry at the end of the indexed context array 810 unsigned exitID = CFGraph->getExit().getBlockID(); 811 saveContext(nullptr, BlockInfo[exitID].ExitContext); 812 } 813 814 /// Find the appropriate source locations to use when producing diagnostics for 815 /// each block in the CFG. 816 static void findBlockLocations(CFG *CFGraph, 817 const PostOrderCFGView *SortedGraph, 818 std::vector<CFGBlockInfo> &BlockInfo) { 819 for (const auto *CurrBlock : *SortedGraph) { 820 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 821 822 // Find the source location of the last statement in the block, if the 823 // block is not empty. 824 if (const Stmt *S = CurrBlock->getTerminator()) { 825 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); 826 } else { 827 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 828 BE = CurrBlock->rend(); BI != BE; ++BI) { 829 // FIXME: Handle other CFGElement kinds. 830 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 831 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); 832 break; 833 } 834 } 835 } 836 837 if (CurrBlockInfo->ExitLoc.isValid()) { 838 // This block contains at least one statement. Find the source location 839 // of the first statement in the block. 840 for (const auto &BI : *CurrBlock) { 841 // FIXME: Handle other CFGElement kinds. 842 if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { 843 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); 844 break; 845 } 846 } 847 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 848 CurrBlock != &CFGraph->getExit()) { 849 // The block is empty, and has a single predecessor. Use its exit 850 // location. 851 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 852 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 853 } 854 } 855 } 856 857 namespace { 858 859 class LockableFactEntry : public FactEntry { 860 private: 861 /// managed by ScopedLockable object 862 bool Managed; 863 864 public: 865 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 866 bool Mng = false, bool Asrt = false) 867 : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {} 868 869 void 870 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 871 SourceLocation JoinLoc, LockErrorKind LEK, 872 ThreadSafetyHandler &Handler) const override { 873 if (!Managed && !asserted() && !negative() && !isUniversal()) { 874 Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc, 875 LEK); 876 } 877 } 878 879 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 880 ThreadSafetyHandler &Handler, 881 StringRef DiagKind) const override { 882 Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc()); 883 } 884 885 void handleUnlock(FactSet &FSet, FactManager &FactMan, 886 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 887 bool FullyRemove, ThreadSafetyHandler &Handler, 888 StringRef DiagKind) const override { 889 FSet.removeLock(FactMan, Cp); 890 if (!Cp.negative()) { 891 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 892 !Cp, LK_Exclusive, UnlockLoc)); 893 } 894 } 895 }; 896 897 class ScopedLockableFactEntry : public FactEntry { 898 private: 899 SmallVector<const til::SExpr *, 4> UnderlyingMutexes; 900 901 public: 902 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc, 903 const CapExprSet &Excl, const CapExprSet &Shrd) 904 : FactEntry(CE, LK_Exclusive, Loc, false) { 905 for (const auto &M : Excl) 906 UnderlyingMutexes.push_back(M.sexpr()); 907 for (const auto &M : Shrd) 908 UnderlyingMutexes.push_back(M.sexpr()); 909 } 910 911 void 912 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 913 SourceLocation JoinLoc, LockErrorKind LEK, 914 ThreadSafetyHandler &Handler) const override { 915 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 916 if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) { 917 // If this scoped lock manages another mutex, and if the underlying 918 // mutex is still held, then warn about the underlying mutex. 919 Handler.handleMutexHeldEndOfScope( 920 "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK); 921 } 922 } 923 } 924 925 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 926 ThreadSafetyHandler &Handler, 927 StringRef DiagKind) const override { 928 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 929 CapabilityExpr UnderCp(UnderlyingMutex, false); 930 931 // We're relocking the underlying mutexes. Warn on double locking. 932 if (FSet.findLock(FactMan, UnderCp)) { 933 Handler.handleDoubleLock(DiagKind, UnderCp.toString(), entry.loc()); 934 } else { 935 FSet.removeLock(FactMan, !UnderCp); 936 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 937 UnderCp, entry.kind(), entry.loc())); 938 } 939 } 940 } 941 942 void handleUnlock(FactSet &FSet, FactManager &FactMan, 943 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 944 bool FullyRemove, ThreadSafetyHandler &Handler, 945 StringRef DiagKind) const override { 946 assert(!Cp.negative() && "Managing object cannot be negative."); 947 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 948 CapabilityExpr UnderCp(UnderlyingMutex, false); 949 auto UnderEntry = llvm::make_unique<LockableFactEntry>( 950 !UnderCp, LK_Exclusive, UnlockLoc); 951 952 if (FullyRemove) { 953 // We're destroying the managing object. 954 // Remove the underlying mutex if it exists; but don't warn. 955 if (FSet.findLock(FactMan, UnderCp)) { 956 FSet.removeLock(FactMan, UnderCp); 957 FSet.addLock(FactMan, std::move(UnderEntry)); 958 } 959 } else { 960 // We're releasing the underlying mutex, but not destroying the 961 // managing object. Warn on dual release. 962 if (!FSet.findLock(FactMan, UnderCp)) { 963 Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), 964 UnlockLoc); 965 } 966 FSet.removeLock(FactMan, UnderCp); 967 FSet.addLock(FactMan, std::move(UnderEntry)); 968 } 969 } 970 if (FullyRemove) 971 FSet.removeLock(FactMan, Cp); 972 } 973 }; 974 975 /// Class which implements the core thread safety analysis routines. 976 class ThreadSafetyAnalyzer { 977 friend class BuildLockset; 978 friend class threadSafety::BeforeSet; 979 980 llvm::BumpPtrAllocator Bpa; 981 threadSafety::til::MemRegionRef Arena; 982 threadSafety::SExprBuilder SxBuilder; 983 984 ThreadSafetyHandler &Handler; 985 const CXXMethodDecl *CurrentMethod; 986 LocalVariableMap LocalVarMap; 987 FactManager FactMan; 988 std::vector<CFGBlockInfo> BlockInfo; 989 990 BeforeSet *GlobalBeforeSet; 991 992 public: 993 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) 994 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} 995 996 bool inCurrentScope(const CapabilityExpr &CapE); 997 998 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, 999 StringRef DiagKind, bool ReqAttr = false); 1000 void removeLock(FactSet &FSet, const CapabilityExpr &CapE, 1001 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, 1002 StringRef DiagKind); 1003 1004 template <typename AttrType> 1005 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1006 const NamedDecl *D, VarDecl *SelfDecl = nullptr); 1007 1008 template <class AttrType> 1009 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1010 const NamedDecl *D, 1011 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1012 Expr *BrE, bool Neg); 1013 1014 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1015 bool &Negate); 1016 1017 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1018 const CFGBlock* PredBlock, 1019 const CFGBlock *CurrBlock); 1020 1021 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1022 SourceLocation JoinLoc, 1023 LockErrorKind LEK1, LockErrorKind LEK2, 1024 bool Modify=true); 1025 1026 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1027 SourceLocation JoinLoc, LockErrorKind LEK1, 1028 bool Modify=true) { 1029 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify); 1030 } 1031 1032 void runAnalysis(AnalysisDeclContext &AC); 1033 }; 1034 1035 } // namespace 1036 1037 /// Process acquired_before and acquired_after attributes on Vd. 1038 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, 1039 ThreadSafetyAnalyzer& Analyzer) { 1040 // Create a new entry for Vd. 1041 BeforeInfo *Info = nullptr; 1042 { 1043 // Keep InfoPtr in its own scope in case BMap is modified later and the 1044 // reference becomes invalid. 1045 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; 1046 if (!InfoPtr) 1047 InfoPtr.reset(new BeforeInfo()); 1048 Info = InfoPtr.get(); 1049 } 1050 1051 for (const auto *At : Vd->attrs()) { 1052 switch (At->getKind()) { 1053 case attr::AcquiredBefore: { 1054 const auto *A = cast<AcquiredBeforeAttr>(At); 1055 1056 // Read exprs from the attribute, and add them to BeforeVect. 1057 for (const auto *Arg : A->args()) { 1058 CapabilityExpr Cp = 1059 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1060 if (const ValueDecl *Cpvd = Cp.valueDecl()) { 1061 Info->Vect.push_back(Cpvd); 1062 const auto It = BMap.find(Cpvd); 1063 if (It == BMap.end()) 1064 insertAttrExprs(Cpvd, Analyzer); 1065 } 1066 } 1067 break; 1068 } 1069 case attr::AcquiredAfter: { 1070 const auto *A = cast<AcquiredAfterAttr>(At); 1071 1072 // Read exprs from the attribute, and add them to BeforeVect. 1073 for (const auto *Arg : A->args()) { 1074 CapabilityExpr Cp = 1075 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1076 if (const ValueDecl *ArgVd = Cp.valueDecl()) { 1077 // Get entry for mutex listed in attribute 1078 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); 1079 ArgInfo->Vect.push_back(Vd); 1080 } 1081 } 1082 break; 1083 } 1084 default: 1085 break; 1086 } 1087 } 1088 1089 return Info; 1090 } 1091 1092 BeforeSet::BeforeInfo * 1093 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, 1094 ThreadSafetyAnalyzer &Analyzer) { 1095 auto It = BMap.find(Vd); 1096 BeforeInfo *Info = nullptr; 1097 if (It == BMap.end()) 1098 Info = insertAttrExprs(Vd, Analyzer); 1099 else 1100 Info = It->second.get(); 1101 assert(Info && "BMap contained nullptr?"); 1102 return Info; 1103 } 1104 1105 /// Return true if any mutexes in FSet are in the acquired_before set of Vd. 1106 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, 1107 const FactSet& FSet, 1108 ThreadSafetyAnalyzer& Analyzer, 1109 SourceLocation Loc, StringRef CapKind) { 1110 SmallVector<BeforeInfo*, 8> InfoVect; 1111 1112 // Do a depth-first traversal of Vd. 1113 // Return true if there are cycles. 1114 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { 1115 if (!Vd) 1116 return false; 1117 1118 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); 1119 1120 if (Info->Visited == 1) 1121 return true; 1122 1123 if (Info->Visited == 2) 1124 return false; 1125 1126 if (Info->Vect.empty()) 1127 return false; 1128 1129 InfoVect.push_back(Info); 1130 Info->Visited = 1; 1131 for (const auto *Vdb : Info->Vect) { 1132 // Exclude mutexes in our immediate before set. 1133 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { 1134 StringRef L1 = StartVd->getName(); 1135 StringRef L2 = Vdb->getName(); 1136 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); 1137 } 1138 // Transitively search other before sets, and warn on cycles. 1139 if (traverse(Vdb)) { 1140 if (CycMap.find(Vd) == CycMap.end()) { 1141 CycMap.insert(std::make_pair(Vd, true)); 1142 StringRef L1 = Vd->getName(); 1143 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); 1144 } 1145 } 1146 } 1147 Info->Visited = 2; 1148 return false; 1149 }; 1150 1151 traverse(StartVd); 1152 1153 for (auto *Info : InfoVect) 1154 Info->Visited = 0; 1155 } 1156 1157 /// Gets the value decl pointer from DeclRefExprs or MemberExprs. 1158 static const ValueDecl *getValueDecl(const Expr *Exp) { 1159 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1160 return getValueDecl(CE->getSubExpr()); 1161 1162 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) 1163 return DR->getDecl(); 1164 1165 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) 1166 return ME->getMemberDecl(); 1167 1168 return nullptr; 1169 } 1170 1171 namespace { 1172 1173 template <typename Ty> 1174 class has_arg_iterator_range { 1175 using yes = char[1]; 1176 using no = char[2]; 1177 1178 template <typename Inner> 1179 static yes& test(Inner *I, decltype(I->args()) * = nullptr); 1180 1181 template <typename> 1182 static no& test(...); 1183 1184 public: 1185 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); 1186 }; 1187 1188 } // namespace 1189 1190 static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { 1191 return A->getName(); 1192 } 1193 1194 static StringRef ClassifyDiagnostic(QualType VDT) { 1195 // We need to look at the declaration of the type of the value to determine 1196 // which it is. The type should either be a record or a typedef, or a pointer 1197 // or reference thereof. 1198 if (const auto *RT = VDT->getAs<RecordType>()) { 1199 if (const auto *RD = RT->getDecl()) 1200 if (const auto *CA = RD->getAttr<CapabilityAttr>()) 1201 return ClassifyDiagnostic(CA); 1202 } else if (const auto *TT = VDT->getAs<TypedefType>()) { 1203 if (const auto *TD = TT->getDecl()) 1204 if (const auto *CA = TD->getAttr<CapabilityAttr>()) 1205 return ClassifyDiagnostic(CA); 1206 } else if (VDT->isPointerType() || VDT->isReferenceType()) 1207 return ClassifyDiagnostic(VDT->getPointeeType()); 1208 1209 return "mutex"; 1210 } 1211 1212 static StringRef ClassifyDiagnostic(const ValueDecl *VD) { 1213 assert(VD && "No ValueDecl passed"); 1214 1215 // The ValueDecl is the declaration of a mutex or role (hopefully). 1216 return ClassifyDiagnostic(VD->getType()); 1217 } 1218 1219 template <typename AttrTy> 1220 static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value, 1221 StringRef>::type 1222 ClassifyDiagnostic(const AttrTy *A) { 1223 if (const ValueDecl *VD = getValueDecl(A->getArg())) 1224 return ClassifyDiagnostic(VD); 1225 return "mutex"; 1226 } 1227 1228 template <typename AttrTy> 1229 static typename std::enable_if<has_arg_iterator_range<AttrTy>::value, 1230 StringRef>::type 1231 ClassifyDiagnostic(const AttrTy *A) { 1232 for (const auto *Arg : A->args()) { 1233 if (const ValueDecl *VD = getValueDecl(Arg)) 1234 return ClassifyDiagnostic(VD); 1235 } 1236 return "mutex"; 1237 } 1238 1239 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { 1240 if (!CurrentMethod) 1241 return false; 1242 if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) { 1243 const auto *VD = P->clangDecl(); 1244 if (VD) 1245 return VD->getDeclContext() == CurrentMethod->getDeclContext(); 1246 } 1247 return false; 1248 } 1249 1250 /// Add a new lock to the lockset, warning if the lock is already there. 1251 /// \param ReqAttr -- true if this is part of an initial Requires attribute. 1252 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, 1253 std::unique_ptr<FactEntry> Entry, 1254 StringRef DiagKind, bool ReqAttr) { 1255 if (Entry->shouldIgnore()) 1256 return; 1257 1258 if (!ReqAttr && !Entry->negative()) { 1259 // look for the negative capability, and remove it from the fact set. 1260 CapabilityExpr NegC = !*Entry; 1261 FactEntry *Nen = FSet.findLock(FactMan, NegC); 1262 if (Nen) { 1263 FSet.removeLock(FactMan, NegC); 1264 } 1265 else { 1266 if (inCurrentScope(*Entry) && !Entry->asserted()) 1267 Handler.handleNegativeNotHeld(DiagKind, Entry->toString(), 1268 NegC.toString(), Entry->loc()); 1269 } 1270 } 1271 1272 // Check before/after constraints 1273 if (Handler.issueBetaWarnings() && 1274 !Entry->asserted() && !Entry->declared()) { 1275 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, 1276 Entry->loc(), DiagKind); 1277 } 1278 1279 // FIXME: Don't always warn when we have support for reentrant locks. 1280 if (FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { 1281 if (!Entry->asserted()) 1282 Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind); 1283 } else { 1284 FSet.addLock(FactMan, std::move(Entry)); 1285 } 1286 } 1287 1288 /// Remove a lock from the lockset, warning if the lock is not there. 1289 /// \param UnlockLoc The source location of the unlock (only used in error msg) 1290 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, 1291 SourceLocation UnlockLoc, 1292 bool FullyRemove, LockKind ReceivedKind, 1293 StringRef DiagKind) { 1294 if (Cp.shouldIgnore()) 1295 return; 1296 1297 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1298 if (!LDat) { 1299 Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc); 1300 return; 1301 } 1302 1303 // Generic lock removal doesn't care about lock kind mismatches, but 1304 // otherwise diagnose when the lock kinds are mismatched. 1305 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { 1306 Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), 1307 LDat->kind(), ReceivedKind, UnlockLoc); 1308 } 1309 1310 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler, 1311 DiagKind); 1312 } 1313 1314 /// Extract the list of mutexIDs from the attribute on an expression, 1315 /// and push them onto Mtxs, discarding any duplicates. 1316 template <typename AttrType> 1317 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1318 const Expr *Exp, const NamedDecl *D, 1319 VarDecl *SelfDecl) { 1320 if (Attr->args_size() == 0) { 1321 // The mutex held is the "this" object. 1322 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl); 1323 if (Cp.isInvalid()) { 1324 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1325 return; 1326 } 1327 //else 1328 if (!Cp.shouldIgnore()) 1329 Mtxs.push_back_nodup(Cp); 1330 return; 1331 } 1332 1333 for (const auto *Arg : Attr->args()) { 1334 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl); 1335 if (Cp.isInvalid()) { 1336 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1337 continue; 1338 } 1339 //else 1340 if (!Cp.shouldIgnore()) 1341 Mtxs.push_back_nodup(Cp); 1342 } 1343 } 1344 1345 /// Extract the list of mutexIDs from a trylock attribute. If the 1346 /// trylock applies to the given edge, then push them onto Mtxs, discarding 1347 /// any duplicates. 1348 template <class AttrType> 1349 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1350 const Expr *Exp, const NamedDecl *D, 1351 const CFGBlock *PredBlock, 1352 const CFGBlock *CurrBlock, 1353 Expr *BrE, bool Neg) { 1354 // Find out which branch has the lock 1355 bool branch = false; 1356 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) 1357 branch = BLE->getValue(); 1358 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) 1359 branch = ILE->getValue().getBoolValue(); 1360 1361 int branchnum = branch ? 0 : 1; 1362 if (Neg) 1363 branchnum = !branchnum; 1364 1365 // If we've taken the trylock branch, then add the lock 1366 int i = 0; 1367 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1368 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1369 if (*SI == CurrBlock && i == branchnum) 1370 getMutexIDs(Mtxs, Attr, Exp, D); 1371 } 1372 } 1373 1374 static bool getStaticBooleanValue(Expr *E, bool &TCond) { 1375 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1376 TCond = false; 1377 return true; 1378 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1379 TCond = BLE->getValue(); 1380 return true; 1381 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { 1382 TCond = ILE->getValue().getBoolValue(); 1383 return true; 1384 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) 1385 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1386 return false; 1387 } 1388 1389 // If Cond can be traced back to a function call, return the call expression. 1390 // The negate variable should be called with false, and will be set to true 1391 // if the function call is negated, e.g. if (!mu.tryLock(...)) 1392 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1393 LocalVarContext C, 1394 bool &Negate) { 1395 if (!Cond) 1396 return nullptr; 1397 1398 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) 1399 return CallExp; 1400 else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) 1401 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1402 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) 1403 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1404 else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond)) 1405 return getTrylockCallExpr(EWC->getSubExpr(), C, Negate); 1406 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1407 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1408 return getTrylockCallExpr(E, C, Negate); 1409 } 1410 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { 1411 if (UOP->getOpcode() == UO_LNot) { 1412 Negate = !Negate; 1413 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1414 } 1415 return nullptr; 1416 } 1417 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { 1418 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1419 if (BOP->getOpcode() == BO_NE) 1420 Negate = !Negate; 1421 1422 bool TCond = false; 1423 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1424 if (!TCond) Negate = !Negate; 1425 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1426 } 1427 TCond = false; 1428 if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1429 if (!TCond) Negate = !Negate; 1430 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1431 } 1432 return nullptr; 1433 } 1434 if (BOP->getOpcode() == BO_LAnd) { 1435 // LHS must have been evaluated in a different block. 1436 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1437 } 1438 if (BOP->getOpcode() == BO_LOr) 1439 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1440 return nullptr; 1441 } 1442 return nullptr; 1443 } 1444 1445 /// Find the lockset that holds on the edge between PredBlock 1446 /// and CurrBlock. The edge set is the exit set of PredBlock (passed 1447 /// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1448 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1449 const FactSet &ExitSet, 1450 const CFGBlock *PredBlock, 1451 const CFGBlock *CurrBlock) { 1452 Result = ExitSet; 1453 1454 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1455 if (!Cond) 1456 return; 1457 1458 bool Negate = false; 1459 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1460 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1461 StringRef CapDiagKind = "mutex"; 1462 1463 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1464 if (!Exp) 1465 return; 1466 1467 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1468 if(!FunDecl || !FunDecl->hasAttrs()) 1469 return; 1470 1471 CapExprSet ExclusiveLocksToAdd; 1472 CapExprSet SharedLocksToAdd; 1473 1474 // If the condition is a call to a Trylock function, then grab the attributes 1475 for (const auto *Attr : FunDecl->attrs()) { 1476 switch (Attr->getKind()) { 1477 case attr::TryAcquireCapability: { 1478 auto *A = cast<TryAcquireCapabilityAttr>(Attr); 1479 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 1480 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), 1481 Negate); 1482 CapDiagKind = ClassifyDiagnostic(A); 1483 break; 1484 }; 1485 case attr::ExclusiveTrylockFunction: { 1486 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); 1487 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1488 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1489 CapDiagKind = ClassifyDiagnostic(A); 1490 break; 1491 } 1492 case attr::SharedTrylockFunction: { 1493 const auto *A = cast<SharedTrylockFunctionAttr>(Attr); 1494 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, 1495 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1496 CapDiagKind = ClassifyDiagnostic(A); 1497 break; 1498 } 1499 default: 1500 break; 1501 } 1502 } 1503 1504 // Add and remove locks. 1505 SourceLocation Loc = Exp->getExprLoc(); 1506 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) 1507 addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd, 1508 LK_Exclusive, Loc), 1509 CapDiagKind); 1510 for (const auto &SharedLockToAdd : SharedLocksToAdd) 1511 addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd, 1512 LK_Shared, Loc), 1513 CapDiagKind); 1514 } 1515 1516 namespace { 1517 1518 /// We use this class to visit different types of expressions in 1519 /// CFGBlocks, and build up the lockset. 1520 /// An expression may cause us to add or remove locks from the lockset, or else 1521 /// output error messages related to missing locks. 1522 /// FIXME: In future, we may be able to not inherit from a visitor. 1523 class BuildLockset : public ConstStmtVisitor<BuildLockset> { 1524 friend class ThreadSafetyAnalyzer; 1525 1526 ThreadSafetyAnalyzer *Analyzer; 1527 FactSet FSet; 1528 LocalVariableMap::Context LVarCtx; 1529 unsigned CtxIndex; 1530 1531 // helper functions 1532 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, 1533 Expr *MutexExp, ProtectedOperationKind POK, 1534 StringRef DiagKind, SourceLocation Loc); 1535 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, 1536 StringRef DiagKind); 1537 1538 void checkAccess(const Expr *Exp, AccessKind AK, 1539 ProtectedOperationKind POK = POK_VarAccess); 1540 void checkPtAccess(const Expr *Exp, AccessKind AK, 1541 ProtectedOperationKind POK = POK_VarAccess); 1542 1543 void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr); 1544 1545 public: 1546 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1547 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), 1548 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {} 1549 1550 void VisitUnaryOperator(const UnaryOperator *UO); 1551 void VisitBinaryOperator(const BinaryOperator *BO); 1552 void VisitCastExpr(const CastExpr *CE); 1553 void VisitCallExpr(const CallExpr *Exp); 1554 void VisitCXXConstructExpr(const CXXConstructExpr *Exp); 1555 void VisitDeclStmt(const DeclStmt *S); 1556 }; 1557 1558 } // namespace 1559 1560 /// Warn if the LSet does not contain a lock sufficient to protect access 1561 /// of at least the passed in AccessKind. 1562 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, 1563 AccessKind AK, Expr *MutexExp, 1564 ProtectedOperationKind POK, 1565 StringRef DiagKind, SourceLocation Loc) { 1566 LockKind LK = getLockKindFromAccessKind(AK); 1567 1568 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1569 if (Cp.isInvalid()) { 1570 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1571 return; 1572 } else if (Cp.shouldIgnore()) { 1573 return; 1574 } 1575 1576 if (Cp.negative()) { 1577 // Negative capabilities act like locks excluded 1578 FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp); 1579 if (LDat) { 1580 Analyzer->Handler.handleFunExcludesLock( 1581 DiagKind, D->getNameAsString(), (!Cp).toString(), Loc); 1582 return; 1583 } 1584 1585 // If this does not refer to a negative capability in the same class, 1586 // then stop here. 1587 if (!Analyzer->inCurrentScope(Cp)) 1588 return; 1589 1590 // Otherwise the negative requirement must be propagated to the caller. 1591 LDat = FSet.findLock(Analyzer->FactMan, Cp); 1592 if (!LDat) { 1593 Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(), 1594 LK_Shared, Loc); 1595 } 1596 return; 1597 } 1598 1599 FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp); 1600 bool NoError = true; 1601 if (!LDat) { 1602 // No exact match found. Look for a partial match. 1603 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp); 1604 if (LDat) { 1605 // Warn that there's no precise match. 1606 std::string PartMatchStr = LDat->toString(); 1607 StringRef PartMatchName(PartMatchStr); 1608 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1609 LK, Loc, &PartMatchName); 1610 } else { 1611 // Warn that there's no match at all. 1612 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1613 LK, Loc); 1614 } 1615 NoError = false; 1616 } 1617 // Make sure the mutex we found is the right kind. 1618 if (NoError && LDat && !LDat->isAtLeast(LK)) { 1619 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1620 LK, Loc); 1621 } 1622 } 1623 1624 /// Warn if the LSet contains the given lock. 1625 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, 1626 Expr *MutexExp, StringRef DiagKind) { 1627 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1628 if (Cp.isInvalid()) { 1629 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1630 return; 1631 } else if (Cp.shouldIgnore()) { 1632 return; 1633 } 1634 1635 FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp); 1636 if (LDat) { 1637 Analyzer->Handler.handleFunExcludesLock( 1638 DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc()); 1639 } 1640 } 1641 1642 /// Checks guarded_by and pt_guarded_by attributes. 1643 /// Whenever we identify an access (read or write) to a DeclRefExpr that is 1644 /// marked with guarded_by, we must ensure the appropriate mutexes are held. 1645 /// Similarly, we check if the access is to an expression that dereferences 1646 /// a pointer marked with pt_guarded_by. 1647 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK, 1648 ProtectedOperationKind POK) { 1649 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); 1650 1651 SourceLocation Loc = Exp->getExprLoc(); 1652 1653 // Local variables of reference type cannot be re-assigned; 1654 // map them to their initializer. 1655 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { 1656 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); 1657 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { 1658 if (const auto *E = VD->getInit()) { 1659 // Guard against self-initialization. e.g., int &i = i; 1660 if (E == Exp) 1661 break; 1662 Exp = E; 1663 continue; 1664 } 1665 } 1666 break; 1667 } 1668 1669 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { 1670 // For dereferences 1671 if (UO->getOpcode() == UO_Deref) 1672 checkPtAccess(UO->getSubExpr(), AK, POK); 1673 return; 1674 } 1675 1676 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { 1677 checkPtAccess(AE->getLHS(), AK, POK); 1678 return; 1679 } 1680 1681 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { 1682 if (ME->isArrow()) 1683 checkPtAccess(ME->getBase(), AK, POK); 1684 else 1685 checkAccess(ME->getBase(), AK, POK); 1686 } 1687 1688 const ValueDecl *D = getValueDecl(Exp); 1689 if (!D || !D->hasAttrs()) 1690 return; 1691 1692 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) { 1693 Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc); 1694 } 1695 1696 for (const auto *I : D->specific_attrs<GuardedByAttr>()) 1697 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, 1698 ClassifyDiagnostic(I), Loc); 1699 } 1700 1701 /// Checks pt_guarded_by and pt_guarded_var attributes. 1702 /// POK is the same operationKind that was passed to checkAccess. 1703 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK, 1704 ProtectedOperationKind POK) { 1705 while (true) { 1706 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { 1707 Exp = PE->getSubExpr(); 1708 continue; 1709 } 1710 if (const auto *CE = dyn_cast<CastExpr>(Exp)) { 1711 if (CE->getCastKind() == CK_ArrayToPointerDecay) { 1712 // If it's an actual array, and not a pointer, then it's elements 1713 // are protected by GUARDED_BY, not PT_GUARDED_BY; 1714 checkAccess(CE->getSubExpr(), AK, POK); 1715 return; 1716 } 1717 Exp = CE->getSubExpr(); 1718 continue; 1719 } 1720 break; 1721 } 1722 1723 // Pass by reference warnings are under a different flag. 1724 ProtectedOperationKind PtPOK = POK_VarDereference; 1725 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; 1726 1727 const ValueDecl *D = getValueDecl(Exp); 1728 if (!D || !D->hasAttrs()) 1729 return; 1730 1731 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) 1732 Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK, 1733 Exp->getExprLoc()); 1734 1735 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) 1736 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, 1737 ClassifyDiagnostic(I), Exp->getExprLoc()); 1738 } 1739 1740 /// Process a function call, method call, constructor call, 1741 /// or destructor call. This involves looking at the attributes on the 1742 /// corresponding function/method/constructor/destructor, issuing warnings, 1743 /// and updating the locksets accordingly. 1744 /// 1745 /// FIXME: For classes annotated with one of the guarded annotations, we need 1746 /// to treat const method calls as reads and non-const method calls as writes, 1747 /// and check that the appropriate locks are held. Non-const method calls with 1748 /// the same signature as const method calls can be also treated as reads. 1749 /// 1750 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, 1751 VarDecl *VD) { 1752 SourceLocation Loc = Exp->getExprLoc(); 1753 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; 1754 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; 1755 CapExprSet ScopedExclusiveReqs, ScopedSharedReqs; 1756 StringRef CapDiagKind = "mutex"; 1757 1758 // Figure out if we're constructing an object of scoped lockable class 1759 bool isScopedVar = false; 1760 if (VD) { 1761 if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) { 1762 const CXXRecordDecl* PD = CD->getParent(); 1763 if (PD && PD->hasAttr<ScopedLockableAttr>()) 1764 isScopedVar = true; 1765 } 1766 } 1767 1768 for(const Attr *At : D->attrs()) { 1769 switch (At->getKind()) { 1770 // When we encounter a lock function, we need to add the lock to our 1771 // lockset. 1772 case attr::AcquireCapability: { 1773 const auto *A = cast<AcquireCapabilityAttr>(At); 1774 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1775 : ExclusiveLocksToAdd, 1776 A, Exp, D, VD); 1777 1778 CapDiagKind = ClassifyDiagnostic(A); 1779 break; 1780 } 1781 1782 // An assert will add a lock to the lockset, but will not generate 1783 // a warning if it is already there, and will not generate a warning 1784 // if it is not removed. 1785 case attr::AssertExclusiveLock: { 1786 const auto *A = cast<AssertExclusiveLockAttr>(At); 1787 1788 CapExprSet AssertLocks; 1789 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1790 for (const auto &AssertLock : AssertLocks) 1791 Analyzer->addLock(FSet, 1792 llvm::make_unique<LockableFactEntry>( 1793 AssertLock, LK_Exclusive, Loc, false, true), 1794 ClassifyDiagnostic(A)); 1795 break; 1796 } 1797 case attr::AssertSharedLock: { 1798 const auto *A = cast<AssertSharedLockAttr>(At); 1799 1800 CapExprSet AssertLocks; 1801 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1802 for (const auto &AssertLock : AssertLocks) 1803 Analyzer->addLock(FSet, 1804 llvm::make_unique<LockableFactEntry>( 1805 AssertLock, LK_Shared, Loc, false, true), 1806 ClassifyDiagnostic(A)); 1807 break; 1808 } 1809 1810 case attr::AssertCapability: { 1811 const auto *A = cast<AssertCapabilityAttr>(At); 1812 CapExprSet AssertLocks; 1813 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1814 for (const auto &AssertLock : AssertLocks) 1815 Analyzer->addLock(FSet, 1816 llvm::make_unique<LockableFactEntry>( 1817 AssertLock, 1818 A->isShared() ? LK_Shared : LK_Exclusive, Loc, 1819 false, true), 1820 ClassifyDiagnostic(A)); 1821 break; 1822 } 1823 1824 // When we encounter an unlock function, we need to remove unlocked 1825 // mutexes from the lockset, and flag a warning if they are not there. 1826 case attr::ReleaseCapability: { 1827 const auto *A = cast<ReleaseCapabilityAttr>(At); 1828 if (A->isGeneric()) 1829 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD); 1830 else if (A->isShared()) 1831 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD); 1832 else 1833 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD); 1834 1835 CapDiagKind = ClassifyDiagnostic(A); 1836 break; 1837 } 1838 1839 case attr::RequiresCapability: { 1840 const auto *A = cast<RequiresCapabilityAttr>(At); 1841 for (auto *Arg : A->args()) { 1842 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, 1843 POK_FunctionCall, ClassifyDiagnostic(A), 1844 Exp->getExprLoc()); 1845 // use for adopting a lock 1846 if (isScopedVar) { 1847 Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs 1848 : ScopedExclusiveReqs, 1849 A, Exp, D, VD); 1850 } 1851 } 1852 break; 1853 } 1854 1855 case attr::LocksExcluded: { 1856 const auto *A = cast<LocksExcludedAttr>(At); 1857 for (auto *Arg : A->args()) 1858 warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A)); 1859 break; 1860 } 1861 1862 // Ignore attributes unrelated to thread-safety 1863 default: 1864 break; 1865 } 1866 } 1867 1868 // Remove locks first to allow lock upgrading/downgrading. 1869 // FIXME -- should only fully remove if the attribute refers to 'this'. 1870 bool Dtor = isa<CXXDestructorDecl>(D); 1871 for (const auto &M : ExclusiveLocksToRemove) 1872 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind); 1873 for (const auto &M : SharedLocksToRemove) 1874 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind); 1875 for (const auto &M : GenericLocksToRemove) 1876 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind); 1877 1878 // Add locks. 1879 for (const auto &M : ExclusiveLocksToAdd) 1880 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( 1881 M, LK_Exclusive, Loc, isScopedVar), 1882 CapDiagKind); 1883 for (const auto &M : SharedLocksToAdd) 1884 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( 1885 M, LK_Shared, Loc, isScopedVar), 1886 CapDiagKind); 1887 1888 if (isScopedVar) { 1889 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 1890 SourceLocation MLoc = VD->getLocation(); 1891 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); 1892 // FIXME: does this store a pointer to DRE? 1893 CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); 1894 1895 std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(), 1896 std::back_inserter(ExclusiveLocksToAdd)); 1897 std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(), 1898 std::back_inserter(SharedLocksToAdd)); 1899 Analyzer->addLock(FSet, 1900 llvm::make_unique<ScopedLockableFactEntry>( 1901 Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd), 1902 CapDiagKind); 1903 } 1904 } 1905 1906 /// For unary operations which read and write a variable, we need to 1907 /// check whether we hold any required mutexes. Reads are checked in 1908 /// VisitCastExpr. 1909 void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { 1910 switch (UO->getOpcode()) { 1911 case UO_PostDec: 1912 case UO_PostInc: 1913 case UO_PreDec: 1914 case UO_PreInc: 1915 checkAccess(UO->getSubExpr(), AK_Written); 1916 break; 1917 default: 1918 break; 1919 } 1920 } 1921 1922 /// For binary operations which assign to a variable (writes), we need to check 1923 /// whether we hold any required mutexes. 1924 /// FIXME: Deal with non-primitive types. 1925 void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { 1926 if (!BO->isAssignmentOp()) 1927 return; 1928 1929 // adjust the context 1930 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 1931 1932 checkAccess(BO->getLHS(), AK_Written); 1933 } 1934 1935 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and 1936 /// need to ensure we hold any required mutexes. 1937 /// FIXME: Deal with non-primitive types. 1938 void BuildLockset::VisitCastExpr(const CastExpr *CE) { 1939 if (CE->getCastKind() != CK_LValueToRValue) 1940 return; 1941 checkAccess(CE->getSubExpr(), AK_Read); 1942 } 1943 1944 void BuildLockset::VisitCallExpr(const CallExpr *Exp) { 1945 bool ExamineArgs = true; 1946 bool OperatorFun = false; 1947 1948 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 1949 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); 1950 // ME can be null when calling a method pointer 1951 const CXXMethodDecl *MD = CE->getMethodDecl(); 1952 1953 if (ME && MD) { 1954 if (ME->isArrow()) { 1955 if (MD->isConst()) 1956 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 1957 else // FIXME -- should be AK_Written 1958 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 1959 } else { 1960 if (MD->isConst()) 1961 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 1962 else // FIXME -- should be AK_Written 1963 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 1964 } 1965 } 1966 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 1967 OperatorFun = true; 1968 1969 auto OEop = OE->getOperator(); 1970 switch (OEop) { 1971 case OO_Equal: { 1972 ExamineArgs = false; 1973 const Expr *Target = OE->getArg(0); 1974 const Expr *Source = OE->getArg(1); 1975 checkAccess(Target, AK_Written); 1976 checkAccess(Source, AK_Read); 1977 break; 1978 } 1979 case OO_Star: 1980 case OO_Arrow: 1981 case OO_Subscript: { 1982 const Expr *Obj = OE->getArg(0); 1983 checkAccess(Obj, AK_Read); 1984 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { 1985 // Grrr. operator* can be multiplication... 1986 checkPtAccess(Obj, AK_Read); 1987 } 1988 break; 1989 } 1990 default: { 1991 // TODO: get rid of this, and rely on pass-by-ref instead. 1992 const Expr *Obj = OE->getArg(0); 1993 checkAccess(Obj, AK_Read); 1994 break; 1995 } 1996 } 1997 } 1998 1999 if (ExamineArgs) { 2000 if (const FunctionDecl *FD = Exp->getDirectCallee()) { 2001 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it 2002 // only turns off checking within the body of a function, but we also 2003 // use it to turn off checking in arguments to the function. This 2004 // could result in some false negatives, but the alternative is to 2005 // create yet another attribute. 2006 if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) { 2007 unsigned Fn = FD->getNumParams(); 2008 unsigned Cn = Exp->getNumArgs(); 2009 unsigned Skip = 0; 2010 2011 unsigned i = 0; 2012 if (OperatorFun) { 2013 if (isa<CXXMethodDecl>(FD)) { 2014 // First arg in operator call is implicit self argument, 2015 // and doesn't appear in the FunctionDecl. 2016 Skip = 1; 2017 Cn--; 2018 } else { 2019 // Ignore the first argument of operators; it's been checked above. 2020 i = 1; 2021 } 2022 } 2023 // Ignore default arguments 2024 unsigned n = (Fn < Cn) ? Fn : Cn; 2025 2026 for (; i < n; ++i) { 2027 const ParmVarDecl *Pvd = FD->getParamDecl(i); 2028 const Expr *Arg = Exp->getArg(i + Skip); 2029 QualType Qt = Pvd->getType(); 2030 if (Qt->isReferenceType()) 2031 checkAccess(Arg, AK_Read, POK_PassByRef); 2032 } 2033 } 2034 } 2035 } 2036 2037 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2038 if(!D || !D->hasAttrs()) 2039 return; 2040 handleCall(Exp, D); 2041 } 2042 2043 void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { 2044 const CXXConstructorDecl *D = Exp->getConstructor(); 2045 if (D && D->isCopyConstructor()) { 2046 const Expr* Source = Exp->getArg(0); 2047 checkAccess(Source, AK_Read); 2048 } 2049 // FIXME -- only handles constructors in DeclStmt below. 2050 } 2051 2052 static CXXConstructorDecl * 2053 findConstructorForByValueReturn(const CXXRecordDecl *RD) { 2054 // Prefer a move constructor over a copy constructor. If there's more than 2055 // one copy constructor or more than one move constructor, we arbitrarily 2056 // pick the first declared such constructor rather than trying to guess which 2057 // one is more appropriate. 2058 CXXConstructorDecl *CopyCtor = nullptr; 2059 for (auto *Ctor : RD->ctors()) { 2060 if (Ctor->isDeleted()) 2061 continue; 2062 if (Ctor->isMoveConstructor()) 2063 return Ctor; 2064 if (!CopyCtor && Ctor->isCopyConstructor()) 2065 CopyCtor = Ctor; 2066 } 2067 return CopyCtor; 2068 } 2069 2070 static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args, 2071 SourceLocation Loc) { 2072 ASTContext &Ctx = CD->getASTContext(); 2073 return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc, 2074 CD, true, Args, false, false, false, false, 2075 CXXConstructExpr::CK_Complete, 2076 SourceRange(Loc, Loc)); 2077 } 2078 2079 void BuildLockset::VisitDeclStmt(const DeclStmt *S) { 2080 // adjust the context 2081 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2082 2083 for (auto *D : S->getDeclGroup()) { 2084 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { 2085 Expr *E = VD->getInit(); 2086 if (!E) 2087 continue; 2088 E = E->IgnoreParens(); 2089 2090 // handle constructors that involve temporaries 2091 if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) 2092 E = EWC->getSubExpr(); 2093 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) 2094 E = BTE->getSubExpr(); 2095 2096 if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) { 2097 const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2098 if (!CtorD || !CtorD->hasAttrs()) 2099 continue; 2100 handleCall(E, CtorD, VD); 2101 } else if (isa<CallExpr>(E) && E->isRValue()) { 2102 // If the object is initialized by a function call that returns a 2103 // scoped lockable by value, use the attributes on the copy or move 2104 // constructor to figure out what effect that should have on the 2105 // lockset. 2106 // FIXME: Is this really the best way to handle this situation? 2107 auto *RD = E->getType()->getAsCXXRecordDecl(); 2108 if (!RD || !RD->hasAttr<ScopedLockableAttr>()) 2109 continue; 2110 CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD); 2111 if (!CtorD || !CtorD->hasAttrs()) 2112 continue; 2113 handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD); 2114 } 2115 } 2116 } 2117 } 2118 2119 /// Compute the intersection of two locksets and issue warnings for any 2120 /// locks in the symmetric difference. 2121 /// 2122 /// This function is used at a merge point in the CFG when comparing the lockset 2123 /// of each branch being merged. For example, given the following sequence: 2124 /// A; if () then B; else C; D; we need to check that the lockset after B and C 2125 /// are the same. In the event of a difference, we use the intersection of these 2126 /// two locksets at the start of D. 2127 /// 2128 /// \param FSet1 The first lockset. 2129 /// \param FSet2 The second lockset. 2130 /// \param JoinLoc The location of the join point for error reporting 2131 /// \param LEK1 The error message to report if a mutex is missing from LSet1 2132 /// \param LEK2 The error message to report if a mutex is missing from Lset2 2133 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, 2134 const FactSet &FSet2, 2135 SourceLocation JoinLoc, 2136 LockErrorKind LEK1, 2137 LockErrorKind LEK2, 2138 bool Modify) { 2139 FactSet FSet1Orig = FSet1; 2140 2141 // Find locks in FSet2 that conflict or are not in FSet1, and warn. 2142 for (const auto &Fact : FSet2) { 2143 const FactEntry *LDat1 = nullptr; 2144 const FactEntry *LDat2 = &FactMan[Fact]; 2145 FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2); 2146 if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1]; 2147 2148 if (LDat1) { 2149 if (LDat1->kind() != LDat2->kind()) { 2150 Handler.handleExclusiveAndShared("mutex", LDat2->toString(), 2151 LDat2->loc(), LDat1->loc()); 2152 if (Modify && LDat1->kind() != LK_Exclusive) { 2153 // Take the exclusive lock, which is the one in FSet2. 2154 *Iter1 = Fact; 2155 } 2156 } 2157 else if (Modify && LDat1->asserted() && !LDat2->asserted()) { 2158 // The non-asserted lock in FSet2 is the one we want to track. 2159 *Iter1 = Fact; 2160 } 2161 } else { 2162 LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1, 2163 Handler); 2164 } 2165 } 2166 2167 // Find locks in FSet1 that are not in FSet2, and remove them. 2168 for (const auto &Fact : FSet1Orig) { 2169 const FactEntry *LDat1 = &FactMan[Fact]; 2170 const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1); 2171 2172 if (!LDat2) { 2173 LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2, 2174 Handler); 2175 if (Modify) 2176 FSet1.removeLock(FactMan, *LDat1); 2177 } 2178 } 2179 } 2180 2181 // Return true if block B never continues to its successors. 2182 static bool neverReturns(const CFGBlock *B) { 2183 if (B->hasNoReturnElement()) 2184 return true; 2185 if (B->empty()) 2186 return false; 2187 2188 CFGElement Last = B->back(); 2189 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2190 if (isa<CXXThrowExpr>(S->getStmt())) 2191 return true; 2192 } 2193 return false; 2194 } 2195 2196 /// Check a function's CFG for thread-safety violations. 2197 /// 2198 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2199 /// at the end of each block, and issue warnings for thread safety violations. 2200 /// Each block in the CFG is traversed exactly once. 2201 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2202 // TODO: this whole function needs be rewritten as a visitor for CFGWalker. 2203 // For now, we just use the walker to set things up. 2204 threadSafety::CFGWalker walker; 2205 if (!walker.init(AC)) 2206 return; 2207 2208 // AC.dumpCFG(true); 2209 // threadSafety::printSCFG(walker); 2210 2211 CFG *CFGraph = walker.getGraph(); 2212 const NamedDecl *D = walker.getDecl(); 2213 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D); 2214 CurrentMethod = dyn_cast<CXXMethodDecl>(D); 2215 2216 if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) 2217 return; 2218 2219 // FIXME: Do something a bit more intelligent inside constructor and 2220 // destructor code. Constructors and destructors must assume unique access 2221 // to 'this', so checks on member variable access is disabled, but we should 2222 // still enable checks on other objects. 2223 if (isa<CXXConstructorDecl>(D)) 2224 return; // Don't check inside constructors. 2225 if (isa<CXXDestructorDecl>(D)) 2226 return; // Don't check inside destructors. 2227 2228 Handler.enterFunction(CurrentFunction); 2229 2230 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2231 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2232 2233 // We need to explore the CFG via a "topological" ordering. 2234 // That way, we will be guaranteed to have information about required 2235 // predecessor locksets when exploring a new block. 2236 const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); 2237 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2238 2239 // Mark entry block as reachable 2240 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; 2241 2242 // Compute SSA names for local variables 2243 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2244 2245 // Fill in source locations for all CFGBlocks. 2246 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2247 2248 CapExprSet ExclusiveLocksAcquired; 2249 CapExprSet SharedLocksAcquired; 2250 CapExprSet LocksReleased; 2251 2252 // Add locks from exclusive_locks_required and shared_locks_required 2253 // to initial lockset. Also turn off checking for lock and unlock functions. 2254 // FIXME: is there a more intelligent way to check lock/unlock functions? 2255 if (!SortedGraph->empty() && D->hasAttrs()) { 2256 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2257 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2258 2259 CapExprSet ExclusiveLocksToAdd; 2260 CapExprSet SharedLocksToAdd; 2261 StringRef CapDiagKind = "mutex"; 2262 2263 SourceLocation Loc = D->getLocation(); 2264 for (const auto *Attr : D->attrs()) { 2265 Loc = Attr->getLocation(); 2266 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2267 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2268 nullptr, D); 2269 CapDiagKind = ClassifyDiagnostic(A); 2270 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2271 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2272 // We must ignore such methods. 2273 if (A->args_size() == 0) 2274 return; 2275 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2276 nullptr, D); 2277 getMutexIDs(LocksReleased, A, nullptr, D); 2278 CapDiagKind = ClassifyDiagnostic(A); 2279 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2280 if (A->args_size() == 0) 2281 return; 2282 getMutexIDs(A->isShared() ? SharedLocksAcquired 2283 : ExclusiveLocksAcquired, 2284 A, nullptr, D); 2285 CapDiagKind = ClassifyDiagnostic(A); 2286 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2287 // Don't try to check trylock functions for now. 2288 return; 2289 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2290 // Don't try to check trylock functions for now. 2291 return; 2292 } else if (isa<TryAcquireCapabilityAttr>(Attr)) { 2293 // Don't try to check trylock functions for now. 2294 return; 2295 } 2296 } 2297 2298 // FIXME -- Loc can be wrong here. 2299 for (const auto &Mu : ExclusiveLocksToAdd) { 2300 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc); 2301 Entry->setDeclared(true); 2302 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2303 } 2304 for (const auto &Mu : SharedLocksToAdd) { 2305 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc); 2306 Entry->setDeclared(true); 2307 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2308 } 2309 } 2310 2311 for (const auto *CurrBlock : *SortedGraph) { 2312 int CurrBlockID = CurrBlock->getBlockID(); 2313 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2314 2315 // Use the default initial lockset in case there are no predecessors. 2316 VisitedBlocks.insert(CurrBlock); 2317 2318 // Iterate through the predecessor blocks and warn if the lockset for all 2319 // predecessors is not the same. We take the entry lockset of the current 2320 // block to be the intersection of all previous locksets. 2321 // FIXME: By keeping the intersection, we may output more errors in future 2322 // for a lock which is not in the intersection, but was in the union. We 2323 // may want to also keep the union in future. As an example, let's say 2324 // the intersection contains Mutex L, and the union contains L and M. 2325 // Later we unlock M. At this point, we would output an error because we 2326 // never locked M; although the real error is probably that we forgot to 2327 // lock M on all code paths. Conversely, let's say that later we lock M. 2328 // In this case, we should compare against the intersection instead of the 2329 // union because the real error is probably that we forgot to unlock M on 2330 // all code paths. 2331 bool LocksetInitialized = false; 2332 SmallVector<CFGBlock *, 8> SpecialBlocks; 2333 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2334 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2335 // if *PI -> CurrBlock is a back edge 2336 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) 2337 continue; 2338 2339 int PrevBlockID = (*PI)->getBlockID(); 2340 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2341 2342 // Ignore edges from blocks that can't return. 2343 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2344 continue; 2345 2346 // Okay, we can reach this block from the entry. 2347 CurrBlockInfo->Reachable = true; 2348 2349 // If the previous block ended in a 'continue' or 'break' statement, then 2350 // a difference in locksets is probably due to a bug in that block, rather 2351 // than in some other predecessor. In that case, keep the other 2352 // predecessor's lockset. 2353 if (const Stmt *Terminator = (*PI)->getTerminator()) { 2354 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 2355 SpecialBlocks.push_back(*PI); 2356 continue; 2357 } 2358 } 2359 2360 FactSet PrevLockset; 2361 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2362 2363 if (!LocksetInitialized) { 2364 CurrBlockInfo->EntrySet = PrevLockset; 2365 LocksetInitialized = true; 2366 } else { 2367 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2368 CurrBlockInfo->EntryLoc, 2369 LEK_LockedSomePredecessors); 2370 } 2371 } 2372 2373 // Skip rest of block if it's not reachable. 2374 if (!CurrBlockInfo->Reachable) 2375 continue; 2376 2377 // Process continue and break blocks. Assume that the lockset for the 2378 // resulting block is unaffected by any discrepancies in them. 2379 for (const auto *PrevBlock : SpecialBlocks) { 2380 int PrevBlockID = PrevBlock->getBlockID(); 2381 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2382 2383 if (!LocksetInitialized) { 2384 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 2385 LocksetInitialized = true; 2386 } else { 2387 // Determine whether this edge is a loop terminator for diagnostic 2388 // purposes. FIXME: A 'break' statement might be a loop terminator, but 2389 // it might also be part of a switch. Also, a subsequent destructor 2390 // might add to the lockset, in which case the real issue might be a 2391 // double lock on the other path. 2392 const Stmt *Terminator = PrevBlock->getTerminator(); 2393 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 2394 2395 FactSet PrevLockset; 2396 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, 2397 PrevBlock, CurrBlock); 2398 2399 // Do not update EntrySet. 2400 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2401 PrevBlockInfo->ExitLoc, 2402 IsLoop ? LEK_LockedSomeLoopIterations 2403 : LEK_LockedSomePredecessors, 2404 false); 2405 } 2406 } 2407 2408 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2409 2410 // Visit all the statements in the basic block. 2411 for (const auto &BI : *CurrBlock) { 2412 switch (BI.getKind()) { 2413 case CFGElement::Statement: { 2414 CFGStmt CS = BI.castAs<CFGStmt>(); 2415 LocksetBuilder.Visit(CS.getStmt()); 2416 break; 2417 } 2418 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2419 case CFGElement::AutomaticObjectDtor: { 2420 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); 2421 const auto *DD = AD.getDestructorDecl(AC.getASTContext()); 2422 if (!DD->hasAttrs()) 2423 break; 2424 2425 // Create a dummy expression, 2426 auto *VD = const_cast<VarDecl *>(AD.getVarDecl()); 2427 DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(), 2428 VK_LValue, AD.getTriggerStmt()->getEndLoc()); 2429 LocksetBuilder.handleCall(&DRE, DD); 2430 break; 2431 } 2432 default: 2433 break; 2434 } 2435 } 2436 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2437 2438 // For every back edge from CurrBlock (the end of the loop) to another block 2439 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2440 // the one held at the beginning of FirstLoopBlock. We can look up the 2441 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2442 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2443 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2444 // if CurrBlock -> *SI is *not* a back edge 2445 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 2446 continue; 2447 2448 CFGBlock *FirstLoopBlock = *SI; 2449 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2450 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2451 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, 2452 PreLoop->EntryLoc, 2453 LEK_LockedSomeLoopIterations, 2454 false); 2455 } 2456 } 2457 2458 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2459 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2460 2461 // Skip the final check if the exit block is unreachable. 2462 if (!Final->Reachable) 2463 return; 2464 2465 // By default, we expect all locks held on entry to be held on exit. 2466 FactSet ExpectedExitSet = Initial->EntrySet; 2467 2468 // Adjust the expected exit set by adding or removing locks, as declared 2469 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2470 // issue the appropriate warning. 2471 // FIXME: the location here is not quite right. 2472 for (const auto &Lock : ExclusiveLocksAcquired) 2473 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 2474 Lock, LK_Exclusive, D->getLocation())); 2475 for (const auto &Lock : SharedLocksAcquired) 2476 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 2477 Lock, LK_Shared, D->getLocation())); 2478 for (const auto &Lock : LocksReleased) 2479 ExpectedExitSet.removeLock(FactMan, Lock); 2480 2481 // FIXME: Should we call this function for all blocks which exit the function? 2482 intersectAndWarn(ExpectedExitSet, Final->ExitSet, 2483 Final->ExitLoc, 2484 LEK_LockedAtEndOfFunction, 2485 LEK_NotLockedAtEndOfFunction, 2486 false); 2487 2488 Handler.leaveFunction(CurrentFunction); 2489 } 2490 2491 /// Check a function's CFG for thread-safety violations. 2492 /// 2493 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2494 /// at the end of each block, and issue warnings for thread safety violations. 2495 /// Each block in the CFG is traversed exactly once. 2496 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2497 ThreadSafetyHandler &Handler, 2498 BeforeSet **BSet) { 2499 if (!*BSet) 2500 *BSet = new BeforeSet; 2501 ThreadSafetyAnalyzer Analyzer(Handler, *BSet); 2502 Analyzer.runAnalysis(AC); 2503 } 2504 2505 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } 2506 2507 /// Helper function that returns a LockKind required for the given level 2508 /// of access. 2509 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { 2510 switch (AK) { 2511 case AK_Read : 2512 return LK_Shared; 2513 case AK_Written : 2514 return LK_Exclusive; 2515 } 2516 llvm_unreachable("Unknown AccessKind"); 2517 } 2518