1 //===- ThreadSafety.cpp ---------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race 11 // conditions), based off of an annotation system. 12 // 13 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html 14 // for more information. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "clang/Analysis/Analyses/ThreadSafety.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/Decl.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclGroup.h" 23 #include "clang/AST/Expr.h" 24 #include "clang/AST/ExprCXX.h" 25 #include "clang/AST/OperationKinds.h" 26 #include "clang/AST/Stmt.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/AST/Type.h" 29 #include "clang/Analysis/Analyses/PostOrderCFGView.h" 30 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" 31 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" 32 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" 33 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h" 34 #include "clang/Analysis/AnalysisDeclContext.h" 35 #include "clang/Analysis/CFG.h" 36 #include "clang/Basic/LLVM.h" 37 #include "clang/Basic/OperatorKinds.h" 38 #include "clang/Basic/SourceLocation.h" 39 #include "clang/Basic/Specifiers.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/DenseMap.h" 42 #include "llvm/ADT/ImmutableMap.h" 43 #include "llvm/ADT/Optional.h" 44 #include "llvm/ADT/STLExtras.h" 45 #include "llvm/ADT/SmallVector.h" 46 #include "llvm/ADT/StringRef.h" 47 #include "llvm/Support/Allocator.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/ErrorHandling.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include <algorithm> 52 #include <cassert> 53 #include <functional> 54 #include <iterator> 55 #include <memory> 56 #include <string> 57 #include <type_traits> 58 #include <utility> 59 #include <vector> 60 61 using namespace clang; 62 using namespace threadSafety; 63 64 // Key method definition 65 ThreadSafetyHandler::~ThreadSafetyHandler() = default; 66 67 /// Issue a warning about an invalid lock expression 68 static void warnInvalidLock(ThreadSafetyHandler &Handler, 69 const Expr *MutexExp, const NamedDecl *D, 70 const Expr *DeclExp, StringRef Kind) { 71 SourceLocation Loc; 72 if (DeclExp) 73 Loc = DeclExp->getExprLoc(); 74 75 // FIXME: add a note about the attribute location in MutexExp or D 76 if (Loc.isValid()) 77 Handler.handleInvalidLockExp(Kind, Loc); 78 } 79 80 namespace { 81 82 /// A set of CapabilityExpr objects, which are compiled from thread safety 83 /// attributes on a function. 84 class CapExprSet : public SmallVector<CapabilityExpr, 4> { 85 public: 86 /// Push M onto list, but discard duplicates. 87 void push_back_nodup(const CapabilityExpr &CapE) { 88 iterator It = std::find_if(begin(), end(), 89 [=](const CapabilityExpr &CapE2) { 90 return CapE.equals(CapE2); 91 }); 92 if (It == end()) 93 push_back(CapE); 94 } 95 }; 96 97 class FactManager; 98 class FactSet; 99 100 /// This is a helper class that stores a fact that is known at a 101 /// particular point in program execution. Currently, a fact is a capability, 102 /// along with additional information, such as where it was acquired, whether 103 /// it is exclusive or shared, etc. 104 /// 105 /// FIXME: this analysis does not currently support re-entrant locking. 106 class FactEntry : public CapabilityExpr { 107 private: 108 /// Exclusive or shared. 109 LockKind LKind; 110 111 /// Where it was acquired. 112 SourceLocation AcquireLoc; 113 114 /// True if the lock was asserted. 115 bool Asserted; 116 117 /// True if the lock was declared. 118 bool Declared; 119 120 public: 121 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 122 bool Asrt, bool Declrd = false) 123 : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt), 124 Declared(Declrd) {} 125 virtual ~FactEntry() = default; 126 127 LockKind kind() const { return LKind; } 128 SourceLocation loc() const { return AcquireLoc; } 129 bool asserted() const { return Asserted; } 130 bool declared() const { return Declared; } 131 132 void setDeclared(bool D) { Declared = D; } 133 134 virtual void 135 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 136 SourceLocation JoinLoc, LockErrorKind LEK, 137 ThreadSafetyHandler &Handler) const = 0; 138 virtual void handleLock(FactSet &FSet, FactManager &FactMan, 139 const FactEntry &entry, ThreadSafetyHandler &Handler, 140 StringRef DiagKind) const = 0; 141 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, 142 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 143 bool FullyRemove, ThreadSafetyHandler &Handler, 144 StringRef DiagKind) const = 0; 145 146 // Return true if LKind >= LK, where exclusive > shared 147 bool isAtLeast(LockKind LK) const { 148 return (LKind == LK_Exclusive) || (LK == LK_Shared); 149 } 150 }; 151 152 using FactID = unsigned short; 153 154 /// FactManager manages the memory for all facts that are created during 155 /// the analysis of a single routine. 156 class FactManager { 157 private: 158 std::vector<std::unique_ptr<const FactEntry>> Facts; 159 160 public: 161 FactID newFact(std::unique_ptr<FactEntry> Entry) { 162 Facts.push_back(std::move(Entry)); 163 return static_cast<unsigned short>(Facts.size() - 1); 164 } 165 166 const FactEntry &operator[](FactID F) const { return *Facts[F]; } 167 }; 168 169 /// A FactSet is the set of facts that are known to be true at a 170 /// particular program point. FactSets must be small, because they are 171 /// frequently copied, and are thus implemented as a set of indices into a 172 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 173 /// locks, so we can get away with doing a linear search for lookup. Note 174 /// that a hashtable or map is inappropriate in this case, because lookups 175 /// may involve partial pattern matches, rather than exact matches. 176 class FactSet { 177 private: 178 using FactVec = SmallVector<FactID, 4>; 179 180 FactVec FactIDs; 181 182 public: 183 using iterator = FactVec::iterator; 184 using const_iterator = FactVec::const_iterator; 185 186 iterator begin() { return FactIDs.begin(); } 187 const_iterator begin() const { return FactIDs.begin(); } 188 189 iterator end() { return FactIDs.end(); } 190 const_iterator end() const { return FactIDs.end(); } 191 192 bool isEmpty() const { return FactIDs.size() == 0; } 193 194 // Return true if the set contains only negative facts 195 bool isEmpty(FactManager &FactMan) const { 196 for (const auto FID : *this) { 197 if (!FactMan[FID].negative()) 198 return false; 199 } 200 return true; 201 } 202 203 void addLockByID(FactID ID) { FactIDs.push_back(ID); } 204 205 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { 206 FactID F = FM.newFact(std::move(Entry)); 207 FactIDs.push_back(F); 208 return F; 209 } 210 211 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { 212 unsigned n = FactIDs.size(); 213 if (n == 0) 214 return false; 215 216 for (unsigned i = 0; i < n-1; ++i) { 217 if (FM[FactIDs[i]].matches(CapE)) { 218 FactIDs[i] = FactIDs[n-1]; 219 FactIDs.pop_back(); 220 return true; 221 } 222 } 223 if (FM[FactIDs[n-1]].matches(CapE)) { 224 FactIDs.pop_back(); 225 return true; 226 } 227 return false; 228 } 229 230 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { 231 return std::find_if(begin(), end(), [&](FactID ID) { 232 return FM[ID].matches(CapE); 233 }); 234 } 235 236 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { 237 auto I = std::find_if(begin(), end(), [&](FactID ID) { 238 return FM[ID].matches(CapE); 239 }); 240 return I != end() ? &FM[*I] : nullptr; 241 } 242 243 const FactEntry *findLockUniv(FactManager &FM, 244 const CapabilityExpr &CapE) const { 245 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 246 return FM[ID].matchesUniv(CapE); 247 }); 248 return I != end() ? &FM[*I] : nullptr; 249 } 250 251 const FactEntry *findPartialMatch(FactManager &FM, 252 const CapabilityExpr &CapE) const { 253 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 254 return FM[ID].partiallyMatches(CapE); 255 }); 256 return I != end() ? &FM[*I] : nullptr; 257 } 258 259 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { 260 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 261 return FM[ID].valueDecl() == Vd; 262 }); 263 return I != end(); 264 } 265 }; 266 267 class ThreadSafetyAnalyzer; 268 269 } // namespace 270 271 namespace clang { 272 namespace threadSafety { 273 274 class BeforeSet { 275 private: 276 using BeforeVect = SmallVector<const ValueDecl *, 4>; 277 278 struct BeforeInfo { 279 BeforeVect Vect; 280 int Visited = 0; 281 282 BeforeInfo() = default; 283 BeforeInfo(BeforeInfo &&) = default; 284 }; 285 286 using BeforeMap = 287 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; 288 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; 289 290 public: 291 BeforeSet() = default; 292 293 BeforeInfo* insertAttrExprs(const ValueDecl* Vd, 294 ThreadSafetyAnalyzer& Analyzer); 295 296 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, 297 ThreadSafetyAnalyzer &Analyzer); 298 299 void checkBeforeAfter(const ValueDecl* Vd, 300 const FactSet& FSet, 301 ThreadSafetyAnalyzer& Analyzer, 302 SourceLocation Loc, StringRef CapKind); 303 304 private: 305 BeforeMap BMap; 306 CycleMap CycMap; 307 }; 308 309 } // namespace threadSafety 310 } // namespace clang 311 312 namespace { 313 314 class LocalVariableMap; 315 316 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; 317 318 /// A side (entry or exit) of a CFG node. 319 enum CFGBlockSide { CBS_Entry, CBS_Exit }; 320 321 /// CFGBlockInfo is a struct which contains all the information that is 322 /// maintained for each block in the CFG. See LocalVariableMap for more 323 /// information about the contexts. 324 struct CFGBlockInfo { 325 // Lockset held at entry to block 326 FactSet EntrySet; 327 328 // Lockset held at exit from block 329 FactSet ExitSet; 330 331 // Context held at entry to block 332 LocalVarContext EntryContext; 333 334 // Context held at exit from block 335 LocalVarContext ExitContext; 336 337 // Location of first statement in block 338 SourceLocation EntryLoc; 339 340 // Location of last statement in block. 341 SourceLocation ExitLoc; 342 343 // Used to replay contexts later 344 unsigned EntryIndex; 345 346 // Is this block reachable? 347 bool Reachable = false; 348 349 const FactSet &getSet(CFGBlockSide Side) const { 350 return Side == CBS_Entry ? EntrySet : ExitSet; 351 } 352 353 SourceLocation getLocation(CFGBlockSide Side) const { 354 return Side == CBS_Entry ? EntryLoc : ExitLoc; 355 } 356 357 private: 358 CFGBlockInfo(LocalVarContext EmptyCtx) 359 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} 360 361 public: 362 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 363 }; 364 365 // A LocalVariableMap maintains a map from local variables to their currently 366 // valid definitions. It provides SSA-like functionality when traversing the 367 // CFG. Like SSA, each definition or assignment to a variable is assigned a 368 // unique name (an integer), which acts as the SSA name for that definition. 369 // The total set of names is shared among all CFG basic blocks. 370 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs 371 // with their SSA-names. Instead, we compute a Context for each point in the 372 // code, which maps local variables to the appropriate SSA-name. This map 373 // changes with each assignment. 374 // 375 // The map is computed in a single pass over the CFG. Subsequent analyses can 376 // then query the map to find the appropriate Context for a statement, and use 377 // that Context to look up the definitions of variables. 378 class LocalVariableMap { 379 public: 380 using Context = LocalVarContext; 381 382 /// A VarDefinition consists of an expression, representing the value of the 383 /// variable, along with the context in which that expression should be 384 /// interpreted. A reference VarDefinition does not itself contain this 385 /// information, but instead contains a pointer to a previous VarDefinition. 386 struct VarDefinition { 387 public: 388 friend class LocalVariableMap; 389 390 // The original declaration for this variable. 391 const NamedDecl *Dec; 392 393 // The expression for this variable, OR 394 const Expr *Exp = nullptr; 395 396 // Reference to another VarDefinition 397 unsigned Ref = 0; 398 399 // The map with which Exp should be interpreted. 400 Context Ctx; 401 402 bool isReference() { return !Exp; } 403 404 private: 405 // Create ordinary variable definition 406 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 407 : Dec(D), Exp(E), Ctx(C) {} 408 409 // Create reference to previous definition 410 VarDefinition(const NamedDecl *D, unsigned R, Context C) 411 : Dec(D), Ref(R), Ctx(C) {} 412 }; 413 414 private: 415 Context::Factory ContextFactory; 416 std::vector<VarDefinition> VarDefinitions; 417 std::vector<unsigned> CtxIndices; 418 std::vector<std::pair<const Stmt *, Context>> SavedContexts; 419 420 public: 421 LocalVariableMap() { 422 // index 0 is a placeholder for undefined variables (aka phi-nodes). 423 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); 424 } 425 426 /// Look up a definition, within the given context. 427 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 428 const unsigned *i = Ctx.lookup(D); 429 if (!i) 430 return nullptr; 431 assert(*i < VarDefinitions.size()); 432 return &VarDefinitions[*i]; 433 } 434 435 /// Look up the definition for D within the given context. Returns 436 /// NULL if the expression is not statically known. If successful, also 437 /// modifies Ctx to hold the context of the return Expr. 438 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 439 const unsigned *P = Ctx.lookup(D); 440 if (!P) 441 return nullptr; 442 443 unsigned i = *P; 444 while (i > 0) { 445 if (VarDefinitions[i].Exp) { 446 Ctx = VarDefinitions[i].Ctx; 447 return VarDefinitions[i].Exp; 448 } 449 i = VarDefinitions[i].Ref; 450 } 451 return nullptr; 452 } 453 454 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 455 456 /// Return the next context after processing S. This function is used by 457 /// clients of the class to get the appropriate context when traversing the 458 /// CFG. It must be called for every assignment or DeclStmt. 459 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { 460 if (SavedContexts[CtxIndex+1].first == S) { 461 CtxIndex++; 462 Context Result = SavedContexts[CtxIndex].second; 463 return Result; 464 } 465 return C; 466 } 467 468 void dumpVarDefinitionName(unsigned i) { 469 if (i == 0) { 470 llvm::errs() << "Undefined"; 471 return; 472 } 473 const NamedDecl *Dec = VarDefinitions[i].Dec; 474 if (!Dec) { 475 llvm::errs() << "<<NULL>>"; 476 return; 477 } 478 Dec->printName(llvm::errs()); 479 llvm::errs() << "." << i << " " << ((const void*) Dec); 480 } 481 482 /// Dumps an ASCII representation of the variable map to llvm::errs() 483 void dump() { 484 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 485 const Expr *Exp = VarDefinitions[i].Exp; 486 unsigned Ref = VarDefinitions[i].Ref; 487 488 dumpVarDefinitionName(i); 489 llvm::errs() << " = "; 490 if (Exp) Exp->dump(); 491 else { 492 dumpVarDefinitionName(Ref); 493 llvm::errs() << "\n"; 494 } 495 } 496 } 497 498 /// Dumps an ASCII representation of a Context to llvm::errs() 499 void dumpContext(Context C) { 500 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 501 const NamedDecl *D = I.getKey(); 502 D->printName(llvm::errs()); 503 const unsigned *i = C.lookup(D); 504 llvm::errs() << " -> "; 505 dumpVarDefinitionName(*i); 506 llvm::errs() << "\n"; 507 } 508 } 509 510 /// Builds the variable map. 511 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, 512 std::vector<CFGBlockInfo> &BlockInfo); 513 514 protected: 515 friend class VarMapBuilder; 516 517 // Get the current context index 518 unsigned getContextIndex() { return SavedContexts.size()-1; } 519 520 // Save the current context for later replay 521 void saveContext(const Stmt *S, Context C) { 522 SavedContexts.push_back(std::make_pair(S, C)); 523 } 524 525 // Adds a new definition to the given context, and returns a new context. 526 // This method should be called when declaring a new variable. 527 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { 528 assert(!Ctx.contains(D)); 529 unsigned newID = VarDefinitions.size(); 530 Context NewCtx = ContextFactory.add(Ctx, D, newID); 531 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 532 return NewCtx; 533 } 534 535 // Add a new reference to an existing definition. 536 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 537 unsigned newID = VarDefinitions.size(); 538 Context NewCtx = ContextFactory.add(Ctx, D, newID); 539 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 540 return NewCtx; 541 } 542 543 // Updates a definition only if that definition is already in the map. 544 // This method should be called when assigning to an existing variable. 545 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 546 if (Ctx.contains(D)) { 547 unsigned newID = VarDefinitions.size(); 548 Context NewCtx = ContextFactory.remove(Ctx, D); 549 NewCtx = ContextFactory.add(NewCtx, D, newID); 550 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 551 return NewCtx; 552 } 553 return Ctx; 554 } 555 556 // Removes a definition from the context, but keeps the variable name 557 // as a valid variable. The index 0 is a placeholder for cleared definitions. 558 Context clearDefinition(const NamedDecl *D, Context Ctx) { 559 Context NewCtx = Ctx; 560 if (NewCtx.contains(D)) { 561 NewCtx = ContextFactory.remove(NewCtx, D); 562 NewCtx = ContextFactory.add(NewCtx, D, 0); 563 } 564 return NewCtx; 565 } 566 567 // Remove a definition entirely frmo the context. 568 Context removeDefinition(const NamedDecl *D, Context Ctx) { 569 Context NewCtx = Ctx; 570 if (NewCtx.contains(D)) { 571 NewCtx = ContextFactory.remove(NewCtx, D); 572 } 573 return NewCtx; 574 } 575 576 Context intersectContexts(Context C1, Context C2); 577 Context createReferenceContext(Context C); 578 void intersectBackEdge(Context C1, Context C2); 579 }; 580 581 } // namespace 582 583 // This has to be defined after LocalVariableMap. 584 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 585 return CFGBlockInfo(M.getEmptyContext()); 586 } 587 588 namespace { 589 590 /// Visitor which builds a LocalVariableMap 591 class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { 592 public: 593 LocalVariableMap* VMap; 594 LocalVariableMap::Context Ctx; 595 596 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 597 : VMap(VM), Ctx(C) {} 598 599 void VisitDeclStmt(const DeclStmt *S); 600 void VisitBinaryOperator(const BinaryOperator *BO); 601 }; 602 603 } // namespace 604 605 // Add new local variables to the variable map 606 void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { 607 bool modifiedCtx = false; 608 const DeclGroupRef DGrp = S->getDeclGroup(); 609 for (const auto *D : DGrp) { 610 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { 611 const Expr *E = VD->getInit(); 612 613 // Add local variables with trivial type to the variable map 614 QualType T = VD->getType(); 615 if (T.isTrivialType(VD->getASTContext())) { 616 Ctx = VMap->addDefinition(VD, E, Ctx); 617 modifiedCtx = true; 618 } 619 } 620 } 621 if (modifiedCtx) 622 VMap->saveContext(S, Ctx); 623 } 624 625 // Update local variable definitions in variable map 626 void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { 627 if (!BO->isAssignmentOp()) 628 return; 629 630 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 631 632 // Update the variable map and current context. 633 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 634 const ValueDecl *VDec = DRE->getDecl(); 635 if (Ctx.lookup(VDec)) { 636 if (BO->getOpcode() == BO_Assign) 637 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 638 else 639 // FIXME -- handle compound assignment operators 640 Ctx = VMap->clearDefinition(VDec, Ctx); 641 VMap->saveContext(BO, Ctx); 642 } 643 } 644 } 645 646 // Computes the intersection of two contexts. The intersection is the 647 // set of variables which have the same definition in both contexts; 648 // variables with different definitions are discarded. 649 LocalVariableMap::Context 650 LocalVariableMap::intersectContexts(Context C1, Context C2) { 651 Context Result = C1; 652 for (const auto &P : C1) { 653 const NamedDecl *Dec = P.first; 654 const unsigned *i2 = C2.lookup(Dec); 655 if (!i2) // variable doesn't exist on second path 656 Result = removeDefinition(Dec, Result); 657 else if (*i2 != P.second) // variable exists, but has different definition 658 Result = clearDefinition(Dec, Result); 659 } 660 return Result; 661 } 662 663 // For every variable in C, create a new variable that refers to the 664 // definition in C. Return a new context that contains these new variables. 665 // (We use this for a naive implementation of SSA on loop back-edges.) 666 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 667 Context Result = getEmptyContext(); 668 for (const auto &P : C) 669 Result = addReference(P.first, P.second, Result); 670 return Result; 671 } 672 673 // This routine also takes the intersection of C1 and C2, but it does so by 674 // altering the VarDefinitions. C1 must be the result of an earlier call to 675 // createReferenceContext. 676 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 677 for (const auto &P : C1) { 678 unsigned i1 = P.second; 679 VarDefinition *VDef = &VarDefinitions[i1]; 680 assert(VDef->isReference()); 681 682 const unsigned *i2 = C2.lookup(P.first); 683 if (!i2 || (*i2 != i1)) 684 VDef->Ref = 0; // Mark this variable as undefined 685 } 686 } 687 688 // Traverse the CFG in topological order, so all predecessors of a block 689 // (excluding back-edges) are visited before the block itself. At 690 // each point in the code, we calculate a Context, which holds the set of 691 // variable definitions which are visible at that point in execution. 692 // Visible variables are mapped to their definitions using an array that 693 // contains all definitions. 694 // 695 // At join points in the CFG, the set is computed as the intersection of 696 // the incoming sets along each edge, E.g. 697 // 698 // { Context | VarDefinitions } 699 // int x = 0; { x -> x1 | x1 = 0 } 700 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 701 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 702 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 703 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 704 // 705 // This is essentially a simpler and more naive version of the standard SSA 706 // algorithm. Those definitions that remain in the intersection are from blocks 707 // that strictly dominate the current block. We do not bother to insert proper 708 // phi nodes, because they are not used in our analysis; instead, wherever 709 // a phi node would be required, we simply remove that definition from the 710 // context (E.g. x above). 711 // 712 // The initial traversal does not capture back-edges, so those need to be 713 // handled on a separate pass. Whenever the first pass encounters an 714 // incoming back edge, it duplicates the context, creating new definitions 715 // that refer back to the originals. (These correspond to places where SSA 716 // might have to insert a phi node.) On the second pass, these definitions are 717 // set to NULL if the variable has changed on the back-edge (i.e. a phi 718 // node was actually required.) E.g. 719 // 720 // { Context | VarDefinitions } 721 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 722 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 723 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 724 // ... { y -> y1 | x3 = 2, x2 = 1, ... } 725 void LocalVariableMap::traverseCFG(CFG *CFGraph, 726 const PostOrderCFGView *SortedGraph, 727 std::vector<CFGBlockInfo> &BlockInfo) { 728 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 729 730 CtxIndices.resize(CFGraph->getNumBlockIDs()); 731 732 for (const auto *CurrBlock : *SortedGraph) { 733 unsigned CurrBlockID = CurrBlock->getBlockID(); 734 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 735 736 VisitedBlocks.insert(CurrBlock); 737 738 // Calculate the entry context for the current block 739 bool HasBackEdges = false; 740 bool CtxInit = true; 741 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 742 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 743 // if *PI -> CurrBlock is a back edge, so skip it 744 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { 745 HasBackEdges = true; 746 continue; 747 } 748 749 unsigned PrevBlockID = (*PI)->getBlockID(); 750 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 751 752 if (CtxInit) { 753 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 754 CtxInit = false; 755 } 756 else { 757 CurrBlockInfo->EntryContext = 758 intersectContexts(CurrBlockInfo->EntryContext, 759 PrevBlockInfo->ExitContext); 760 } 761 } 762 763 // Duplicate the context if we have back-edges, so we can call 764 // intersectBackEdges later. 765 if (HasBackEdges) 766 CurrBlockInfo->EntryContext = 767 createReferenceContext(CurrBlockInfo->EntryContext); 768 769 // Create a starting context index for the current block 770 saveContext(nullptr, CurrBlockInfo->EntryContext); 771 CurrBlockInfo->EntryIndex = getContextIndex(); 772 773 // Visit all the statements in the basic block. 774 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 775 for (const auto &BI : *CurrBlock) { 776 switch (BI.getKind()) { 777 case CFGElement::Statement: { 778 CFGStmt CS = BI.castAs<CFGStmt>(); 779 VMapBuilder.Visit(CS.getStmt()); 780 break; 781 } 782 default: 783 break; 784 } 785 } 786 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 787 788 // Mark variables on back edges as "unknown" if they've been changed. 789 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 790 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 791 // if CurrBlock -> *SI is *not* a back edge 792 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 793 continue; 794 795 CFGBlock *FirstLoopBlock = *SI; 796 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 797 Context LoopEnd = CurrBlockInfo->ExitContext; 798 intersectBackEdge(LoopBegin, LoopEnd); 799 } 800 } 801 802 // Put an extra entry at the end of the indexed context array 803 unsigned exitID = CFGraph->getExit().getBlockID(); 804 saveContext(nullptr, BlockInfo[exitID].ExitContext); 805 } 806 807 /// Find the appropriate source locations to use when producing diagnostics for 808 /// each block in the CFG. 809 static void findBlockLocations(CFG *CFGraph, 810 const PostOrderCFGView *SortedGraph, 811 std::vector<CFGBlockInfo> &BlockInfo) { 812 for (const auto *CurrBlock : *SortedGraph) { 813 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 814 815 // Find the source location of the last statement in the block, if the 816 // block is not empty. 817 if (const Stmt *S = CurrBlock->getTerminator()) { 818 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); 819 } else { 820 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 821 BE = CurrBlock->rend(); BI != BE; ++BI) { 822 // FIXME: Handle other CFGElement kinds. 823 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 824 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); 825 break; 826 } 827 } 828 } 829 830 if (CurrBlockInfo->ExitLoc.isValid()) { 831 // This block contains at least one statement. Find the source location 832 // of the first statement in the block. 833 for (const auto &BI : *CurrBlock) { 834 // FIXME: Handle other CFGElement kinds. 835 if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { 836 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); 837 break; 838 } 839 } 840 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 841 CurrBlock != &CFGraph->getExit()) { 842 // The block is empty, and has a single predecessor. Use its exit 843 // location. 844 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 845 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 846 } 847 } 848 } 849 850 namespace { 851 852 class LockableFactEntry : public FactEntry { 853 private: 854 /// managed by ScopedLockable object 855 bool Managed; 856 857 public: 858 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 859 bool Mng = false, bool Asrt = false) 860 : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {} 861 862 void 863 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 864 SourceLocation JoinLoc, LockErrorKind LEK, 865 ThreadSafetyHandler &Handler) const override { 866 if (!Managed && !asserted() && !negative() && !isUniversal()) { 867 Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc, 868 LEK); 869 } 870 } 871 872 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 873 ThreadSafetyHandler &Handler, 874 StringRef DiagKind) const override { 875 Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc()); 876 } 877 878 void handleUnlock(FactSet &FSet, FactManager &FactMan, 879 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 880 bool FullyRemove, ThreadSafetyHandler &Handler, 881 StringRef DiagKind) const override { 882 FSet.removeLock(FactMan, Cp); 883 if (!Cp.negative()) { 884 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 885 !Cp, LK_Exclusive, UnlockLoc)); 886 } 887 } 888 }; 889 890 class ScopedLockableFactEntry : public FactEntry { 891 private: 892 SmallVector<const til::SExpr *, 4> UnderlyingMutexes; 893 894 public: 895 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc, 896 const CapExprSet &Excl, const CapExprSet &Shrd) 897 : FactEntry(CE, LK_Exclusive, Loc, false) { 898 for (const auto &M : Excl) 899 UnderlyingMutexes.push_back(M.sexpr()); 900 for (const auto &M : Shrd) 901 UnderlyingMutexes.push_back(M.sexpr()); 902 } 903 904 void 905 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 906 SourceLocation JoinLoc, LockErrorKind LEK, 907 ThreadSafetyHandler &Handler) const override { 908 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 909 if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) { 910 // If this scoped lock manages another mutex, and if the underlying 911 // mutex is still held, then warn about the underlying mutex. 912 Handler.handleMutexHeldEndOfScope( 913 "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK); 914 } 915 } 916 } 917 918 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 919 ThreadSafetyHandler &Handler, 920 StringRef DiagKind) const override { 921 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 922 CapabilityExpr UnderCp(UnderlyingMutex, false); 923 924 // We're relocking the underlying mutexes. Warn on double locking. 925 if (FSet.findLock(FactMan, UnderCp)) { 926 Handler.handleDoubleLock(DiagKind, UnderCp.toString(), entry.loc()); 927 } else { 928 FSet.removeLock(FactMan, !UnderCp); 929 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 930 UnderCp, entry.kind(), entry.loc())); 931 } 932 } 933 } 934 935 void handleUnlock(FactSet &FSet, FactManager &FactMan, 936 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 937 bool FullyRemove, ThreadSafetyHandler &Handler, 938 StringRef DiagKind) const override { 939 assert(!Cp.negative() && "Managing object cannot be negative."); 940 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 941 CapabilityExpr UnderCp(UnderlyingMutex, false); 942 auto UnderEntry = llvm::make_unique<LockableFactEntry>( 943 !UnderCp, LK_Exclusive, UnlockLoc); 944 945 if (FullyRemove) { 946 // We're destroying the managing object. 947 // Remove the underlying mutex if it exists; but don't warn. 948 if (FSet.findLock(FactMan, UnderCp)) { 949 FSet.removeLock(FactMan, UnderCp); 950 FSet.addLock(FactMan, std::move(UnderEntry)); 951 } 952 } else { 953 // We're releasing the underlying mutex, but not destroying the 954 // managing object. Warn on dual release. 955 if (!FSet.findLock(FactMan, UnderCp)) { 956 Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), 957 UnlockLoc); 958 } 959 FSet.removeLock(FactMan, UnderCp); 960 FSet.addLock(FactMan, std::move(UnderEntry)); 961 } 962 } 963 if (FullyRemove) 964 FSet.removeLock(FactMan, Cp); 965 } 966 }; 967 968 /// Class which implements the core thread safety analysis routines. 969 class ThreadSafetyAnalyzer { 970 friend class BuildLockset; 971 friend class threadSafety::BeforeSet; 972 973 llvm::BumpPtrAllocator Bpa; 974 threadSafety::til::MemRegionRef Arena; 975 threadSafety::SExprBuilder SxBuilder; 976 977 ThreadSafetyHandler &Handler; 978 const CXXMethodDecl *CurrentMethod; 979 LocalVariableMap LocalVarMap; 980 FactManager FactMan; 981 std::vector<CFGBlockInfo> BlockInfo; 982 983 BeforeSet *GlobalBeforeSet; 984 985 public: 986 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) 987 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} 988 989 bool inCurrentScope(const CapabilityExpr &CapE); 990 991 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, 992 StringRef DiagKind, bool ReqAttr = false); 993 void removeLock(FactSet &FSet, const CapabilityExpr &CapE, 994 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, 995 StringRef DiagKind); 996 997 template <typename AttrType> 998 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 999 const NamedDecl *D, VarDecl *SelfDecl = nullptr); 1000 1001 template <class AttrType> 1002 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1003 const NamedDecl *D, 1004 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1005 Expr *BrE, bool Neg); 1006 1007 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1008 bool &Negate); 1009 1010 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1011 const CFGBlock* PredBlock, 1012 const CFGBlock *CurrBlock); 1013 1014 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1015 SourceLocation JoinLoc, 1016 LockErrorKind LEK1, LockErrorKind LEK2, 1017 bool Modify=true); 1018 1019 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1020 SourceLocation JoinLoc, LockErrorKind LEK1, 1021 bool Modify=true) { 1022 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify); 1023 } 1024 1025 void runAnalysis(AnalysisDeclContext &AC); 1026 }; 1027 1028 } // namespace 1029 1030 /// Process acquired_before and acquired_after attributes on Vd. 1031 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, 1032 ThreadSafetyAnalyzer& Analyzer) { 1033 // Create a new entry for Vd. 1034 BeforeInfo *Info = nullptr; 1035 { 1036 // Keep InfoPtr in its own scope in case BMap is modified later and the 1037 // reference becomes invalid. 1038 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; 1039 if (!InfoPtr) 1040 InfoPtr.reset(new BeforeInfo()); 1041 Info = InfoPtr.get(); 1042 } 1043 1044 for (const auto *At : Vd->attrs()) { 1045 switch (At->getKind()) { 1046 case attr::AcquiredBefore: { 1047 const auto *A = cast<AcquiredBeforeAttr>(At); 1048 1049 // Read exprs from the attribute, and add them to BeforeVect. 1050 for (const auto *Arg : A->args()) { 1051 CapabilityExpr Cp = 1052 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1053 if (const ValueDecl *Cpvd = Cp.valueDecl()) { 1054 Info->Vect.push_back(Cpvd); 1055 const auto It = BMap.find(Cpvd); 1056 if (It == BMap.end()) 1057 insertAttrExprs(Cpvd, Analyzer); 1058 } 1059 } 1060 break; 1061 } 1062 case attr::AcquiredAfter: { 1063 const auto *A = cast<AcquiredAfterAttr>(At); 1064 1065 // Read exprs from the attribute, and add them to BeforeVect. 1066 for (const auto *Arg : A->args()) { 1067 CapabilityExpr Cp = 1068 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1069 if (const ValueDecl *ArgVd = Cp.valueDecl()) { 1070 // Get entry for mutex listed in attribute 1071 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); 1072 ArgInfo->Vect.push_back(Vd); 1073 } 1074 } 1075 break; 1076 } 1077 default: 1078 break; 1079 } 1080 } 1081 1082 return Info; 1083 } 1084 1085 BeforeSet::BeforeInfo * 1086 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, 1087 ThreadSafetyAnalyzer &Analyzer) { 1088 auto It = BMap.find(Vd); 1089 BeforeInfo *Info = nullptr; 1090 if (It == BMap.end()) 1091 Info = insertAttrExprs(Vd, Analyzer); 1092 else 1093 Info = It->second.get(); 1094 assert(Info && "BMap contained nullptr?"); 1095 return Info; 1096 } 1097 1098 /// Return true if any mutexes in FSet are in the acquired_before set of Vd. 1099 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, 1100 const FactSet& FSet, 1101 ThreadSafetyAnalyzer& Analyzer, 1102 SourceLocation Loc, StringRef CapKind) { 1103 SmallVector<BeforeInfo*, 8> InfoVect; 1104 1105 // Do a depth-first traversal of Vd. 1106 // Return true if there are cycles. 1107 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { 1108 if (!Vd) 1109 return false; 1110 1111 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); 1112 1113 if (Info->Visited == 1) 1114 return true; 1115 1116 if (Info->Visited == 2) 1117 return false; 1118 1119 if (Info->Vect.empty()) 1120 return false; 1121 1122 InfoVect.push_back(Info); 1123 Info->Visited = 1; 1124 for (const auto *Vdb : Info->Vect) { 1125 // Exclude mutexes in our immediate before set. 1126 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { 1127 StringRef L1 = StartVd->getName(); 1128 StringRef L2 = Vdb->getName(); 1129 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); 1130 } 1131 // Transitively search other before sets, and warn on cycles. 1132 if (traverse(Vdb)) { 1133 if (CycMap.find(Vd) == CycMap.end()) { 1134 CycMap.insert(std::make_pair(Vd, true)); 1135 StringRef L1 = Vd->getName(); 1136 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); 1137 } 1138 } 1139 } 1140 Info->Visited = 2; 1141 return false; 1142 }; 1143 1144 traverse(StartVd); 1145 1146 for (auto *Info : InfoVect) 1147 Info->Visited = 0; 1148 } 1149 1150 /// Gets the value decl pointer from DeclRefExprs or MemberExprs. 1151 static const ValueDecl *getValueDecl(const Expr *Exp) { 1152 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1153 return getValueDecl(CE->getSubExpr()); 1154 1155 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) 1156 return DR->getDecl(); 1157 1158 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) 1159 return ME->getMemberDecl(); 1160 1161 return nullptr; 1162 } 1163 1164 namespace { 1165 1166 template <typename Ty> 1167 class has_arg_iterator_range { 1168 using yes = char[1]; 1169 using no = char[2]; 1170 1171 template <typename Inner> 1172 static yes& test(Inner *I, decltype(I->args()) * = nullptr); 1173 1174 template <typename> 1175 static no& test(...); 1176 1177 public: 1178 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); 1179 }; 1180 1181 } // namespace 1182 1183 static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { 1184 return A->getName(); 1185 } 1186 1187 static StringRef ClassifyDiagnostic(QualType VDT) { 1188 // We need to look at the declaration of the type of the value to determine 1189 // which it is. The type should either be a record or a typedef, or a pointer 1190 // or reference thereof. 1191 if (const auto *RT = VDT->getAs<RecordType>()) { 1192 if (const auto *RD = RT->getDecl()) 1193 if (const auto *CA = RD->getAttr<CapabilityAttr>()) 1194 return ClassifyDiagnostic(CA); 1195 } else if (const auto *TT = VDT->getAs<TypedefType>()) { 1196 if (const auto *TD = TT->getDecl()) 1197 if (const auto *CA = TD->getAttr<CapabilityAttr>()) 1198 return ClassifyDiagnostic(CA); 1199 } else if (VDT->isPointerType() || VDT->isReferenceType()) 1200 return ClassifyDiagnostic(VDT->getPointeeType()); 1201 1202 return "mutex"; 1203 } 1204 1205 static StringRef ClassifyDiagnostic(const ValueDecl *VD) { 1206 assert(VD && "No ValueDecl passed"); 1207 1208 // The ValueDecl is the declaration of a mutex or role (hopefully). 1209 return ClassifyDiagnostic(VD->getType()); 1210 } 1211 1212 template <typename AttrTy> 1213 static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value, 1214 StringRef>::type 1215 ClassifyDiagnostic(const AttrTy *A) { 1216 if (const ValueDecl *VD = getValueDecl(A->getArg())) 1217 return ClassifyDiagnostic(VD); 1218 return "mutex"; 1219 } 1220 1221 template <typename AttrTy> 1222 static typename std::enable_if<has_arg_iterator_range<AttrTy>::value, 1223 StringRef>::type 1224 ClassifyDiagnostic(const AttrTy *A) { 1225 for (const auto *Arg : A->args()) { 1226 if (const ValueDecl *VD = getValueDecl(Arg)) 1227 return ClassifyDiagnostic(VD); 1228 } 1229 return "mutex"; 1230 } 1231 1232 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { 1233 if (!CurrentMethod) 1234 return false; 1235 if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) { 1236 const auto *VD = P->clangDecl(); 1237 if (VD) 1238 return VD->getDeclContext() == CurrentMethod->getDeclContext(); 1239 } 1240 return false; 1241 } 1242 1243 /// Add a new lock to the lockset, warning if the lock is already there. 1244 /// \param ReqAttr -- true if this is part of an initial Requires attribute. 1245 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, 1246 std::unique_ptr<FactEntry> Entry, 1247 StringRef DiagKind, bool ReqAttr) { 1248 if (Entry->shouldIgnore()) 1249 return; 1250 1251 if (!ReqAttr && !Entry->negative()) { 1252 // look for the negative capability, and remove it from the fact set. 1253 CapabilityExpr NegC = !*Entry; 1254 const FactEntry *Nen = FSet.findLock(FactMan, NegC); 1255 if (Nen) { 1256 FSet.removeLock(FactMan, NegC); 1257 } 1258 else { 1259 if (inCurrentScope(*Entry) && !Entry->asserted()) 1260 Handler.handleNegativeNotHeld(DiagKind, Entry->toString(), 1261 NegC.toString(), Entry->loc()); 1262 } 1263 } 1264 1265 // Check before/after constraints 1266 if (Handler.issueBetaWarnings() && 1267 !Entry->asserted() && !Entry->declared()) { 1268 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, 1269 Entry->loc(), DiagKind); 1270 } 1271 1272 // FIXME: Don't always warn when we have support for reentrant locks. 1273 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { 1274 if (!Entry->asserted()) 1275 Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind); 1276 } else { 1277 FSet.addLock(FactMan, std::move(Entry)); 1278 } 1279 } 1280 1281 /// Remove a lock from the lockset, warning if the lock is not there. 1282 /// \param UnlockLoc The source location of the unlock (only used in error msg) 1283 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, 1284 SourceLocation UnlockLoc, 1285 bool FullyRemove, LockKind ReceivedKind, 1286 StringRef DiagKind) { 1287 if (Cp.shouldIgnore()) 1288 return; 1289 1290 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1291 if (!LDat) { 1292 Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc); 1293 return; 1294 } 1295 1296 // Generic lock removal doesn't care about lock kind mismatches, but 1297 // otherwise diagnose when the lock kinds are mismatched. 1298 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { 1299 Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), 1300 LDat->kind(), ReceivedKind, UnlockLoc); 1301 } 1302 1303 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler, 1304 DiagKind); 1305 } 1306 1307 /// Extract the list of mutexIDs from the attribute on an expression, 1308 /// and push them onto Mtxs, discarding any duplicates. 1309 template <typename AttrType> 1310 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1311 const Expr *Exp, const NamedDecl *D, 1312 VarDecl *SelfDecl) { 1313 if (Attr->args_size() == 0) { 1314 // The mutex held is the "this" object. 1315 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl); 1316 if (Cp.isInvalid()) { 1317 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1318 return; 1319 } 1320 //else 1321 if (!Cp.shouldIgnore()) 1322 Mtxs.push_back_nodup(Cp); 1323 return; 1324 } 1325 1326 for (const auto *Arg : Attr->args()) { 1327 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl); 1328 if (Cp.isInvalid()) { 1329 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1330 continue; 1331 } 1332 //else 1333 if (!Cp.shouldIgnore()) 1334 Mtxs.push_back_nodup(Cp); 1335 } 1336 } 1337 1338 /// Extract the list of mutexIDs from a trylock attribute. If the 1339 /// trylock applies to the given edge, then push them onto Mtxs, discarding 1340 /// any duplicates. 1341 template <class AttrType> 1342 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1343 const Expr *Exp, const NamedDecl *D, 1344 const CFGBlock *PredBlock, 1345 const CFGBlock *CurrBlock, 1346 Expr *BrE, bool Neg) { 1347 // Find out which branch has the lock 1348 bool branch = false; 1349 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) 1350 branch = BLE->getValue(); 1351 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) 1352 branch = ILE->getValue().getBoolValue(); 1353 1354 int branchnum = branch ? 0 : 1; 1355 if (Neg) 1356 branchnum = !branchnum; 1357 1358 // If we've taken the trylock branch, then add the lock 1359 int i = 0; 1360 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1361 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1362 if (*SI == CurrBlock && i == branchnum) 1363 getMutexIDs(Mtxs, Attr, Exp, D); 1364 } 1365 } 1366 1367 static bool getStaticBooleanValue(Expr *E, bool &TCond) { 1368 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1369 TCond = false; 1370 return true; 1371 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1372 TCond = BLE->getValue(); 1373 return true; 1374 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { 1375 TCond = ILE->getValue().getBoolValue(); 1376 return true; 1377 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) 1378 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1379 return false; 1380 } 1381 1382 // If Cond can be traced back to a function call, return the call expression. 1383 // The negate variable should be called with false, and will be set to true 1384 // if the function call is negated, e.g. if (!mu.tryLock(...)) 1385 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1386 LocalVarContext C, 1387 bool &Negate) { 1388 if (!Cond) 1389 return nullptr; 1390 1391 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) 1392 return CallExp; 1393 else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) 1394 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1395 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) 1396 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1397 else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond)) 1398 return getTrylockCallExpr(EWC->getSubExpr(), C, Negate); 1399 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1400 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1401 return getTrylockCallExpr(E, C, Negate); 1402 } 1403 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { 1404 if (UOP->getOpcode() == UO_LNot) { 1405 Negate = !Negate; 1406 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1407 } 1408 return nullptr; 1409 } 1410 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { 1411 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1412 if (BOP->getOpcode() == BO_NE) 1413 Negate = !Negate; 1414 1415 bool TCond = false; 1416 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1417 if (!TCond) Negate = !Negate; 1418 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1419 } 1420 TCond = false; 1421 if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1422 if (!TCond) Negate = !Negate; 1423 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1424 } 1425 return nullptr; 1426 } 1427 if (BOP->getOpcode() == BO_LAnd) { 1428 // LHS must have been evaluated in a different block. 1429 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1430 } 1431 if (BOP->getOpcode() == BO_LOr) 1432 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1433 return nullptr; 1434 } 1435 return nullptr; 1436 } 1437 1438 /// Find the lockset that holds on the edge between PredBlock 1439 /// and CurrBlock. The edge set is the exit set of PredBlock (passed 1440 /// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1441 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1442 const FactSet &ExitSet, 1443 const CFGBlock *PredBlock, 1444 const CFGBlock *CurrBlock) { 1445 Result = ExitSet; 1446 1447 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1448 if (!Cond) 1449 return; 1450 1451 bool Negate = false; 1452 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1453 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1454 StringRef CapDiagKind = "mutex"; 1455 1456 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1457 if (!Exp) 1458 return; 1459 1460 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1461 if(!FunDecl || !FunDecl->hasAttrs()) 1462 return; 1463 1464 CapExprSet ExclusiveLocksToAdd; 1465 CapExprSet SharedLocksToAdd; 1466 1467 // If the condition is a call to a Trylock function, then grab the attributes 1468 for (const auto *Attr : FunDecl->attrs()) { 1469 switch (Attr->getKind()) { 1470 case attr::TryAcquireCapability: { 1471 auto *A = cast<TryAcquireCapabilityAttr>(Attr); 1472 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 1473 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), 1474 Negate); 1475 CapDiagKind = ClassifyDiagnostic(A); 1476 break; 1477 }; 1478 case attr::ExclusiveTrylockFunction: { 1479 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); 1480 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1481 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1482 CapDiagKind = ClassifyDiagnostic(A); 1483 break; 1484 } 1485 case attr::SharedTrylockFunction: { 1486 const auto *A = cast<SharedTrylockFunctionAttr>(Attr); 1487 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, 1488 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1489 CapDiagKind = ClassifyDiagnostic(A); 1490 break; 1491 } 1492 default: 1493 break; 1494 } 1495 } 1496 1497 // Add and remove locks. 1498 SourceLocation Loc = Exp->getExprLoc(); 1499 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) 1500 addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd, 1501 LK_Exclusive, Loc), 1502 CapDiagKind); 1503 for (const auto &SharedLockToAdd : SharedLocksToAdd) 1504 addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd, 1505 LK_Shared, Loc), 1506 CapDiagKind); 1507 } 1508 1509 namespace { 1510 1511 /// We use this class to visit different types of expressions in 1512 /// CFGBlocks, and build up the lockset. 1513 /// An expression may cause us to add or remove locks from the lockset, or else 1514 /// output error messages related to missing locks. 1515 /// FIXME: In future, we may be able to not inherit from a visitor. 1516 class BuildLockset : public ConstStmtVisitor<BuildLockset> { 1517 friend class ThreadSafetyAnalyzer; 1518 1519 ThreadSafetyAnalyzer *Analyzer; 1520 FactSet FSet; 1521 LocalVariableMap::Context LVarCtx; 1522 unsigned CtxIndex; 1523 1524 // helper functions 1525 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, 1526 Expr *MutexExp, ProtectedOperationKind POK, 1527 StringRef DiagKind, SourceLocation Loc); 1528 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, 1529 StringRef DiagKind); 1530 1531 void checkAccess(const Expr *Exp, AccessKind AK, 1532 ProtectedOperationKind POK = POK_VarAccess); 1533 void checkPtAccess(const Expr *Exp, AccessKind AK, 1534 ProtectedOperationKind POK = POK_VarAccess); 1535 1536 void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr); 1537 1538 public: 1539 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1540 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), 1541 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {} 1542 1543 void VisitUnaryOperator(const UnaryOperator *UO); 1544 void VisitBinaryOperator(const BinaryOperator *BO); 1545 void VisitCastExpr(const CastExpr *CE); 1546 void VisitCallExpr(const CallExpr *Exp); 1547 void VisitCXXConstructExpr(const CXXConstructExpr *Exp); 1548 void VisitDeclStmt(const DeclStmt *S); 1549 }; 1550 1551 } // namespace 1552 1553 /// Warn if the LSet does not contain a lock sufficient to protect access 1554 /// of at least the passed in AccessKind. 1555 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, 1556 AccessKind AK, Expr *MutexExp, 1557 ProtectedOperationKind POK, 1558 StringRef DiagKind, SourceLocation Loc) { 1559 LockKind LK = getLockKindFromAccessKind(AK); 1560 1561 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1562 if (Cp.isInvalid()) { 1563 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1564 return; 1565 } else if (Cp.shouldIgnore()) { 1566 return; 1567 } 1568 1569 if (Cp.negative()) { 1570 // Negative capabilities act like locks excluded 1571 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp); 1572 if (LDat) { 1573 Analyzer->Handler.handleFunExcludesLock( 1574 DiagKind, D->getNameAsString(), (!Cp).toString(), Loc); 1575 return; 1576 } 1577 1578 // If this does not refer to a negative capability in the same class, 1579 // then stop here. 1580 if (!Analyzer->inCurrentScope(Cp)) 1581 return; 1582 1583 // Otherwise the negative requirement must be propagated to the caller. 1584 LDat = FSet.findLock(Analyzer->FactMan, Cp); 1585 if (!LDat) { 1586 Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(), 1587 LK_Shared, Loc); 1588 } 1589 return; 1590 } 1591 1592 const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp); 1593 bool NoError = true; 1594 if (!LDat) { 1595 // No exact match found. Look for a partial match. 1596 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp); 1597 if (LDat) { 1598 // Warn that there's no precise match. 1599 std::string PartMatchStr = LDat->toString(); 1600 StringRef PartMatchName(PartMatchStr); 1601 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1602 LK, Loc, &PartMatchName); 1603 } else { 1604 // Warn that there's no match at all. 1605 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1606 LK, Loc); 1607 } 1608 NoError = false; 1609 } 1610 // Make sure the mutex we found is the right kind. 1611 if (NoError && LDat && !LDat->isAtLeast(LK)) { 1612 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1613 LK, Loc); 1614 } 1615 } 1616 1617 /// Warn if the LSet contains the given lock. 1618 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, 1619 Expr *MutexExp, StringRef DiagKind) { 1620 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1621 if (Cp.isInvalid()) { 1622 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1623 return; 1624 } else if (Cp.shouldIgnore()) { 1625 return; 1626 } 1627 1628 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp); 1629 if (LDat) { 1630 Analyzer->Handler.handleFunExcludesLock( 1631 DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc()); 1632 } 1633 } 1634 1635 /// Checks guarded_by and pt_guarded_by attributes. 1636 /// Whenever we identify an access (read or write) to a DeclRefExpr that is 1637 /// marked with guarded_by, we must ensure the appropriate mutexes are held. 1638 /// Similarly, we check if the access is to an expression that dereferences 1639 /// a pointer marked with pt_guarded_by. 1640 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK, 1641 ProtectedOperationKind POK) { 1642 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); 1643 1644 SourceLocation Loc = Exp->getExprLoc(); 1645 1646 // Local variables of reference type cannot be re-assigned; 1647 // map them to their initializer. 1648 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { 1649 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); 1650 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { 1651 if (const auto *E = VD->getInit()) { 1652 // Guard against self-initialization. e.g., int &i = i; 1653 if (E == Exp) 1654 break; 1655 Exp = E; 1656 continue; 1657 } 1658 } 1659 break; 1660 } 1661 1662 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { 1663 // For dereferences 1664 if (UO->getOpcode() == UO_Deref) 1665 checkPtAccess(UO->getSubExpr(), AK, POK); 1666 return; 1667 } 1668 1669 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { 1670 checkPtAccess(AE->getLHS(), AK, POK); 1671 return; 1672 } 1673 1674 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { 1675 if (ME->isArrow()) 1676 checkPtAccess(ME->getBase(), AK, POK); 1677 else 1678 checkAccess(ME->getBase(), AK, POK); 1679 } 1680 1681 const ValueDecl *D = getValueDecl(Exp); 1682 if (!D || !D->hasAttrs()) 1683 return; 1684 1685 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) { 1686 Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc); 1687 } 1688 1689 for (const auto *I : D->specific_attrs<GuardedByAttr>()) 1690 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, 1691 ClassifyDiagnostic(I), Loc); 1692 } 1693 1694 /// Checks pt_guarded_by and pt_guarded_var attributes. 1695 /// POK is the same operationKind that was passed to checkAccess. 1696 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK, 1697 ProtectedOperationKind POK) { 1698 while (true) { 1699 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { 1700 Exp = PE->getSubExpr(); 1701 continue; 1702 } 1703 if (const auto *CE = dyn_cast<CastExpr>(Exp)) { 1704 if (CE->getCastKind() == CK_ArrayToPointerDecay) { 1705 // If it's an actual array, and not a pointer, then it's elements 1706 // are protected by GUARDED_BY, not PT_GUARDED_BY; 1707 checkAccess(CE->getSubExpr(), AK, POK); 1708 return; 1709 } 1710 Exp = CE->getSubExpr(); 1711 continue; 1712 } 1713 break; 1714 } 1715 1716 // Pass by reference warnings are under a different flag. 1717 ProtectedOperationKind PtPOK = POK_VarDereference; 1718 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; 1719 1720 const ValueDecl *D = getValueDecl(Exp); 1721 if (!D || !D->hasAttrs()) 1722 return; 1723 1724 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) 1725 Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK, 1726 Exp->getExprLoc()); 1727 1728 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) 1729 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, 1730 ClassifyDiagnostic(I), Exp->getExprLoc()); 1731 } 1732 1733 /// Process a function call, method call, constructor call, 1734 /// or destructor call. This involves looking at the attributes on the 1735 /// corresponding function/method/constructor/destructor, issuing warnings, 1736 /// and updating the locksets accordingly. 1737 /// 1738 /// FIXME: For classes annotated with one of the guarded annotations, we need 1739 /// to treat const method calls as reads and non-const method calls as writes, 1740 /// and check that the appropriate locks are held. Non-const method calls with 1741 /// the same signature as const method calls can be also treated as reads. 1742 /// 1743 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, 1744 VarDecl *VD) { 1745 SourceLocation Loc = Exp->getExprLoc(); 1746 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; 1747 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; 1748 CapExprSet ScopedExclusiveReqs, ScopedSharedReqs; 1749 StringRef CapDiagKind = "mutex"; 1750 1751 // Figure out if we're constructing an object of scoped lockable class 1752 bool isScopedVar = false; 1753 if (VD) { 1754 if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) { 1755 const CXXRecordDecl* PD = CD->getParent(); 1756 if (PD && PD->hasAttr<ScopedLockableAttr>()) 1757 isScopedVar = true; 1758 } 1759 } 1760 1761 for(const Attr *At : D->attrs()) { 1762 switch (At->getKind()) { 1763 // When we encounter a lock function, we need to add the lock to our 1764 // lockset. 1765 case attr::AcquireCapability: { 1766 const auto *A = cast<AcquireCapabilityAttr>(At); 1767 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1768 : ExclusiveLocksToAdd, 1769 A, Exp, D, VD); 1770 1771 CapDiagKind = ClassifyDiagnostic(A); 1772 break; 1773 } 1774 1775 // An assert will add a lock to the lockset, but will not generate 1776 // a warning if it is already there, and will not generate a warning 1777 // if it is not removed. 1778 case attr::AssertExclusiveLock: { 1779 const auto *A = cast<AssertExclusiveLockAttr>(At); 1780 1781 CapExprSet AssertLocks; 1782 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1783 for (const auto &AssertLock : AssertLocks) 1784 Analyzer->addLock(FSet, 1785 llvm::make_unique<LockableFactEntry>( 1786 AssertLock, LK_Exclusive, Loc, false, true), 1787 ClassifyDiagnostic(A)); 1788 break; 1789 } 1790 case attr::AssertSharedLock: { 1791 const auto *A = cast<AssertSharedLockAttr>(At); 1792 1793 CapExprSet AssertLocks; 1794 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1795 for (const auto &AssertLock : AssertLocks) 1796 Analyzer->addLock(FSet, 1797 llvm::make_unique<LockableFactEntry>( 1798 AssertLock, LK_Shared, Loc, false, true), 1799 ClassifyDiagnostic(A)); 1800 break; 1801 } 1802 1803 case attr::AssertCapability: { 1804 const auto *A = cast<AssertCapabilityAttr>(At); 1805 CapExprSet AssertLocks; 1806 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1807 for (const auto &AssertLock : AssertLocks) 1808 Analyzer->addLock(FSet, 1809 llvm::make_unique<LockableFactEntry>( 1810 AssertLock, 1811 A->isShared() ? LK_Shared : LK_Exclusive, Loc, 1812 false, true), 1813 ClassifyDiagnostic(A)); 1814 break; 1815 } 1816 1817 // When we encounter an unlock function, we need to remove unlocked 1818 // mutexes from the lockset, and flag a warning if they are not there. 1819 case attr::ReleaseCapability: { 1820 const auto *A = cast<ReleaseCapabilityAttr>(At); 1821 if (A->isGeneric()) 1822 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD); 1823 else if (A->isShared()) 1824 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD); 1825 else 1826 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD); 1827 1828 CapDiagKind = ClassifyDiagnostic(A); 1829 break; 1830 } 1831 1832 case attr::RequiresCapability: { 1833 const auto *A = cast<RequiresCapabilityAttr>(At); 1834 for (auto *Arg : A->args()) { 1835 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, 1836 POK_FunctionCall, ClassifyDiagnostic(A), 1837 Exp->getExprLoc()); 1838 // use for adopting a lock 1839 if (isScopedVar) { 1840 Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs 1841 : ScopedExclusiveReqs, 1842 A, Exp, D, VD); 1843 } 1844 } 1845 break; 1846 } 1847 1848 case attr::LocksExcluded: { 1849 const auto *A = cast<LocksExcludedAttr>(At); 1850 for (auto *Arg : A->args()) 1851 warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A)); 1852 break; 1853 } 1854 1855 // Ignore attributes unrelated to thread-safety 1856 default: 1857 break; 1858 } 1859 } 1860 1861 // Remove locks first to allow lock upgrading/downgrading. 1862 // FIXME -- should only fully remove if the attribute refers to 'this'. 1863 bool Dtor = isa<CXXDestructorDecl>(D); 1864 for (const auto &M : ExclusiveLocksToRemove) 1865 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind); 1866 for (const auto &M : SharedLocksToRemove) 1867 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind); 1868 for (const auto &M : GenericLocksToRemove) 1869 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind); 1870 1871 // Add locks. 1872 for (const auto &M : ExclusiveLocksToAdd) 1873 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( 1874 M, LK_Exclusive, Loc, isScopedVar), 1875 CapDiagKind); 1876 for (const auto &M : SharedLocksToAdd) 1877 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( 1878 M, LK_Shared, Loc, isScopedVar), 1879 CapDiagKind); 1880 1881 if (isScopedVar) { 1882 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 1883 SourceLocation MLoc = VD->getLocation(); 1884 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); 1885 // FIXME: does this store a pointer to DRE? 1886 CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); 1887 1888 std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(), 1889 std::back_inserter(ExclusiveLocksToAdd)); 1890 std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(), 1891 std::back_inserter(SharedLocksToAdd)); 1892 Analyzer->addLock(FSet, 1893 llvm::make_unique<ScopedLockableFactEntry>( 1894 Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd), 1895 CapDiagKind); 1896 } 1897 } 1898 1899 /// For unary operations which read and write a variable, we need to 1900 /// check whether we hold any required mutexes. Reads are checked in 1901 /// VisitCastExpr. 1902 void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { 1903 switch (UO->getOpcode()) { 1904 case UO_PostDec: 1905 case UO_PostInc: 1906 case UO_PreDec: 1907 case UO_PreInc: 1908 checkAccess(UO->getSubExpr(), AK_Written); 1909 break; 1910 default: 1911 break; 1912 } 1913 } 1914 1915 /// For binary operations which assign to a variable (writes), we need to check 1916 /// whether we hold any required mutexes. 1917 /// FIXME: Deal with non-primitive types. 1918 void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { 1919 if (!BO->isAssignmentOp()) 1920 return; 1921 1922 // adjust the context 1923 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 1924 1925 checkAccess(BO->getLHS(), AK_Written); 1926 } 1927 1928 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and 1929 /// need to ensure we hold any required mutexes. 1930 /// FIXME: Deal with non-primitive types. 1931 void BuildLockset::VisitCastExpr(const CastExpr *CE) { 1932 if (CE->getCastKind() != CK_LValueToRValue) 1933 return; 1934 checkAccess(CE->getSubExpr(), AK_Read); 1935 } 1936 1937 void BuildLockset::VisitCallExpr(const CallExpr *Exp) { 1938 bool ExamineArgs = true; 1939 bool OperatorFun = false; 1940 1941 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 1942 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); 1943 // ME can be null when calling a method pointer 1944 const CXXMethodDecl *MD = CE->getMethodDecl(); 1945 1946 if (ME && MD) { 1947 if (ME->isArrow()) { 1948 if (MD->isConst()) 1949 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 1950 else // FIXME -- should be AK_Written 1951 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 1952 } else { 1953 if (MD->isConst()) 1954 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 1955 else // FIXME -- should be AK_Written 1956 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 1957 } 1958 } 1959 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 1960 OperatorFun = true; 1961 1962 auto OEop = OE->getOperator(); 1963 switch (OEop) { 1964 case OO_Equal: { 1965 ExamineArgs = false; 1966 const Expr *Target = OE->getArg(0); 1967 const Expr *Source = OE->getArg(1); 1968 checkAccess(Target, AK_Written); 1969 checkAccess(Source, AK_Read); 1970 break; 1971 } 1972 case OO_Star: 1973 case OO_Arrow: 1974 case OO_Subscript: { 1975 const Expr *Obj = OE->getArg(0); 1976 checkAccess(Obj, AK_Read); 1977 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { 1978 // Grrr. operator* can be multiplication... 1979 checkPtAccess(Obj, AK_Read); 1980 } 1981 break; 1982 } 1983 default: { 1984 // TODO: get rid of this, and rely on pass-by-ref instead. 1985 const Expr *Obj = OE->getArg(0); 1986 checkAccess(Obj, AK_Read); 1987 break; 1988 } 1989 } 1990 } 1991 1992 if (ExamineArgs) { 1993 if (const FunctionDecl *FD = Exp->getDirectCallee()) { 1994 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it 1995 // only turns off checking within the body of a function, but we also 1996 // use it to turn off checking in arguments to the function. This 1997 // could result in some false negatives, but the alternative is to 1998 // create yet another attribute. 1999 if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) { 2000 unsigned Fn = FD->getNumParams(); 2001 unsigned Cn = Exp->getNumArgs(); 2002 unsigned Skip = 0; 2003 2004 unsigned i = 0; 2005 if (OperatorFun) { 2006 if (isa<CXXMethodDecl>(FD)) { 2007 // First arg in operator call is implicit self argument, 2008 // and doesn't appear in the FunctionDecl. 2009 Skip = 1; 2010 Cn--; 2011 } else { 2012 // Ignore the first argument of operators; it's been checked above. 2013 i = 1; 2014 } 2015 } 2016 // Ignore default arguments 2017 unsigned n = (Fn < Cn) ? Fn : Cn; 2018 2019 for (; i < n; ++i) { 2020 const ParmVarDecl *Pvd = FD->getParamDecl(i); 2021 const Expr *Arg = Exp->getArg(i + Skip); 2022 QualType Qt = Pvd->getType(); 2023 if (Qt->isReferenceType()) 2024 checkAccess(Arg, AK_Read, POK_PassByRef); 2025 } 2026 } 2027 } 2028 } 2029 2030 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2031 if(!D || !D->hasAttrs()) 2032 return; 2033 handleCall(Exp, D); 2034 } 2035 2036 void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { 2037 const CXXConstructorDecl *D = Exp->getConstructor(); 2038 if (D && D->isCopyConstructor()) { 2039 const Expr* Source = Exp->getArg(0); 2040 checkAccess(Source, AK_Read); 2041 } 2042 // FIXME -- only handles constructors in DeclStmt below. 2043 } 2044 2045 static CXXConstructorDecl * 2046 findConstructorForByValueReturn(const CXXRecordDecl *RD) { 2047 // Prefer a move constructor over a copy constructor. If there's more than 2048 // one copy constructor or more than one move constructor, we arbitrarily 2049 // pick the first declared such constructor rather than trying to guess which 2050 // one is more appropriate. 2051 CXXConstructorDecl *CopyCtor = nullptr; 2052 for (auto *Ctor : RD->ctors()) { 2053 if (Ctor->isDeleted()) 2054 continue; 2055 if (Ctor->isMoveConstructor()) 2056 return Ctor; 2057 if (!CopyCtor && Ctor->isCopyConstructor()) 2058 CopyCtor = Ctor; 2059 } 2060 return CopyCtor; 2061 } 2062 2063 static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args, 2064 SourceLocation Loc) { 2065 ASTContext &Ctx = CD->getASTContext(); 2066 return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc, 2067 CD, true, Args, false, false, false, false, 2068 CXXConstructExpr::CK_Complete, 2069 SourceRange(Loc, Loc)); 2070 } 2071 2072 void BuildLockset::VisitDeclStmt(const DeclStmt *S) { 2073 // adjust the context 2074 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2075 2076 for (auto *D : S->getDeclGroup()) { 2077 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { 2078 Expr *E = VD->getInit(); 2079 if (!E) 2080 continue; 2081 E = E->IgnoreParens(); 2082 2083 // handle constructors that involve temporaries 2084 if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) 2085 E = EWC->getSubExpr(); 2086 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) 2087 E = BTE->getSubExpr(); 2088 2089 if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) { 2090 const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2091 if (!CtorD || !CtorD->hasAttrs()) 2092 continue; 2093 handleCall(E, CtorD, VD); 2094 } else if (isa<CallExpr>(E) && E->isRValue()) { 2095 // If the object is initialized by a function call that returns a 2096 // scoped lockable by value, use the attributes on the copy or move 2097 // constructor to figure out what effect that should have on the 2098 // lockset. 2099 // FIXME: Is this really the best way to handle this situation? 2100 auto *RD = E->getType()->getAsCXXRecordDecl(); 2101 if (!RD || !RD->hasAttr<ScopedLockableAttr>()) 2102 continue; 2103 CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD); 2104 if (!CtorD || !CtorD->hasAttrs()) 2105 continue; 2106 handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD); 2107 } 2108 } 2109 } 2110 } 2111 2112 /// Compute the intersection of two locksets and issue warnings for any 2113 /// locks in the symmetric difference. 2114 /// 2115 /// This function is used at a merge point in the CFG when comparing the lockset 2116 /// of each branch being merged. For example, given the following sequence: 2117 /// A; if () then B; else C; D; we need to check that the lockset after B and C 2118 /// are the same. In the event of a difference, we use the intersection of these 2119 /// two locksets at the start of D. 2120 /// 2121 /// \param FSet1 The first lockset. 2122 /// \param FSet2 The second lockset. 2123 /// \param JoinLoc The location of the join point for error reporting 2124 /// \param LEK1 The error message to report if a mutex is missing from LSet1 2125 /// \param LEK2 The error message to report if a mutex is missing from Lset2 2126 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, 2127 const FactSet &FSet2, 2128 SourceLocation JoinLoc, 2129 LockErrorKind LEK1, 2130 LockErrorKind LEK2, 2131 bool Modify) { 2132 FactSet FSet1Orig = FSet1; 2133 2134 // Find locks in FSet2 that conflict or are not in FSet1, and warn. 2135 for (const auto &Fact : FSet2) { 2136 const FactEntry *LDat1 = nullptr; 2137 const FactEntry *LDat2 = &FactMan[Fact]; 2138 FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2); 2139 if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1]; 2140 2141 if (LDat1) { 2142 if (LDat1->kind() != LDat2->kind()) { 2143 Handler.handleExclusiveAndShared("mutex", LDat2->toString(), 2144 LDat2->loc(), LDat1->loc()); 2145 if (Modify && LDat1->kind() != LK_Exclusive) { 2146 // Take the exclusive lock, which is the one in FSet2. 2147 *Iter1 = Fact; 2148 } 2149 } 2150 else if (Modify && LDat1->asserted() && !LDat2->asserted()) { 2151 // The non-asserted lock in FSet2 is the one we want to track. 2152 *Iter1 = Fact; 2153 } 2154 } else { 2155 LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1, 2156 Handler); 2157 } 2158 } 2159 2160 // Find locks in FSet1 that are not in FSet2, and remove them. 2161 for (const auto &Fact : FSet1Orig) { 2162 const FactEntry *LDat1 = &FactMan[Fact]; 2163 const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1); 2164 2165 if (!LDat2) { 2166 LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2, 2167 Handler); 2168 if (Modify) 2169 FSet1.removeLock(FactMan, *LDat1); 2170 } 2171 } 2172 } 2173 2174 // Return true if block B never continues to its successors. 2175 static bool neverReturns(const CFGBlock *B) { 2176 if (B->hasNoReturnElement()) 2177 return true; 2178 if (B->empty()) 2179 return false; 2180 2181 CFGElement Last = B->back(); 2182 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2183 if (isa<CXXThrowExpr>(S->getStmt())) 2184 return true; 2185 } 2186 return false; 2187 } 2188 2189 /// Check a function's CFG for thread-safety violations. 2190 /// 2191 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2192 /// at the end of each block, and issue warnings for thread safety violations. 2193 /// Each block in the CFG is traversed exactly once. 2194 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2195 // TODO: this whole function needs be rewritten as a visitor for CFGWalker. 2196 // For now, we just use the walker to set things up. 2197 threadSafety::CFGWalker walker; 2198 if (!walker.init(AC)) 2199 return; 2200 2201 // AC.dumpCFG(true); 2202 // threadSafety::printSCFG(walker); 2203 2204 CFG *CFGraph = walker.getGraph(); 2205 const NamedDecl *D = walker.getDecl(); 2206 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D); 2207 CurrentMethod = dyn_cast<CXXMethodDecl>(D); 2208 2209 if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) 2210 return; 2211 2212 // FIXME: Do something a bit more intelligent inside constructor and 2213 // destructor code. Constructors and destructors must assume unique access 2214 // to 'this', so checks on member variable access is disabled, but we should 2215 // still enable checks on other objects. 2216 if (isa<CXXConstructorDecl>(D)) 2217 return; // Don't check inside constructors. 2218 if (isa<CXXDestructorDecl>(D)) 2219 return; // Don't check inside destructors. 2220 2221 Handler.enterFunction(CurrentFunction); 2222 2223 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2224 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2225 2226 // We need to explore the CFG via a "topological" ordering. 2227 // That way, we will be guaranteed to have information about required 2228 // predecessor locksets when exploring a new block. 2229 const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); 2230 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2231 2232 // Mark entry block as reachable 2233 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; 2234 2235 // Compute SSA names for local variables 2236 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2237 2238 // Fill in source locations for all CFGBlocks. 2239 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2240 2241 CapExprSet ExclusiveLocksAcquired; 2242 CapExprSet SharedLocksAcquired; 2243 CapExprSet LocksReleased; 2244 2245 // Add locks from exclusive_locks_required and shared_locks_required 2246 // to initial lockset. Also turn off checking for lock and unlock functions. 2247 // FIXME: is there a more intelligent way to check lock/unlock functions? 2248 if (!SortedGraph->empty() && D->hasAttrs()) { 2249 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2250 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2251 2252 CapExprSet ExclusiveLocksToAdd; 2253 CapExprSet SharedLocksToAdd; 2254 StringRef CapDiagKind = "mutex"; 2255 2256 SourceLocation Loc = D->getLocation(); 2257 for (const auto *Attr : D->attrs()) { 2258 Loc = Attr->getLocation(); 2259 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2260 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2261 nullptr, D); 2262 CapDiagKind = ClassifyDiagnostic(A); 2263 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2264 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2265 // We must ignore such methods. 2266 if (A->args_size() == 0) 2267 return; 2268 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2269 nullptr, D); 2270 getMutexIDs(LocksReleased, A, nullptr, D); 2271 CapDiagKind = ClassifyDiagnostic(A); 2272 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2273 if (A->args_size() == 0) 2274 return; 2275 getMutexIDs(A->isShared() ? SharedLocksAcquired 2276 : ExclusiveLocksAcquired, 2277 A, nullptr, D); 2278 CapDiagKind = ClassifyDiagnostic(A); 2279 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2280 // Don't try to check trylock functions for now. 2281 return; 2282 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2283 // Don't try to check trylock functions for now. 2284 return; 2285 } else if (isa<TryAcquireCapabilityAttr>(Attr)) { 2286 // Don't try to check trylock functions for now. 2287 return; 2288 } 2289 } 2290 2291 // FIXME -- Loc can be wrong here. 2292 for (const auto &Mu : ExclusiveLocksToAdd) { 2293 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc); 2294 Entry->setDeclared(true); 2295 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2296 } 2297 for (const auto &Mu : SharedLocksToAdd) { 2298 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc); 2299 Entry->setDeclared(true); 2300 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2301 } 2302 } 2303 2304 for (const auto *CurrBlock : *SortedGraph) { 2305 unsigned CurrBlockID = CurrBlock->getBlockID(); 2306 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2307 2308 // Use the default initial lockset in case there are no predecessors. 2309 VisitedBlocks.insert(CurrBlock); 2310 2311 // Iterate through the predecessor blocks and warn if the lockset for all 2312 // predecessors is not the same. We take the entry lockset of the current 2313 // block to be the intersection of all previous locksets. 2314 // FIXME: By keeping the intersection, we may output more errors in future 2315 // for a lock which is not in the intersection, but was in the union. We 2316 // may want to also keep the union in future. As an example, let's say 2317 // the intersection contains Mutex L, and the union contains L and M. 2318 // Later we unlock M. At this point, we would output an error because we 2319 // never locked M; although the real error is probably that we forgot to 2320 // lock M on all code paths. Conversely, let's say that later we lock M. 2321 // In this case, we should compare against the intersection instead of the 2322 // union because the real error is probably that we forgot to unlock M on 2323 // all code paths. 2324 bool LocksetInitialized = false; 2325 SmallVector<CFGBlock *, 8> SpecialBlocks; 2326 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2327 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2328 // if *PI -> CurrBlock is a back edge 2329 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) 2330 continue; 2331 2332 unsigned PrevBlockID = (*PI)->getBlockID(); 2333 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2334 2335 // Ignore edges from blocks that can't return. 2336 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2337 continue; 2338 2339 // Okay, we can reach this block from the entry. 2340 CurrBlockInfo->Reachable = true; 2341 2342 // If the previous block ended in a 'continue' or 'break' statement, then 2343 // a difference in locksets is probably due to a bug in that block, rather 2344 // than in some other predecessor. In that case, keep the other 2345 // predecessor's lockset. 2346 if (const Stmt *Terminator = (*PI)->getTerminator()) { 2347 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 2348 SpecialBlocks.push_back(*PI); 2349 continue; 2350 } 2351 } 2352 2353 FactSet PrevLockset; 2354 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2355 2356 if (!LocksetInitialized) { 2357 CurrBlockInfo->EntrySet = PrevLockset; 2358 LocksetInitialized = true; 2359 } else { 2360 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2361 CurrBlockInfo->EntryLoc, 2362 LEK_LockedSomePredecessors); 2363 } 2364 } 2365 2366 // Skip rest of block if it's not reachable. 2367 if (!CurrBlockInfo->Reachable) 2368 continue; 2369 2370 // Process continue and break blocks. Assume that the lockset for the 2371 // resulting block is unaffected by any discrepancies in them. 2372 for (const auto *PrevBlock : SpecialBlocks) { 2373 unsigned PrevBlockID = PrevBlock->getBlockID(); 2374 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2375 2376 if (!LocksetInitialized) { 2377 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 2378 LocksetInitialized = true; 2379 } else { 2380 // Determine whether this edge is a loop terminator for diagnostic 2381 // purposes. FIXME: A 'break' statement might be a loop terminator, but 2382 // it might also be part of a switch. Also, a subsequent destructor 2383 // might add to the lockset, in which case the real issue might be a 2384 // double lock on the other path. 2385 const Stmt *Terminator = PrevBlock->getTerminator(); 2386 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 2387 2388 FactSet PrevLockset; 2389 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, 2390 PrevBlock, CurrBlock); 2391 2392 // Do not update EntrySet. 2393 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2394 PrevBlockInfo->ExitLoc, 2395 IsLoop ? LEK_LockedSomeLoopIterations 2396 : LEK_LockedSomePredecessors, 2397 false); 2398 } 2399 } 2400 2401 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2402 2403 // Visit all the statements in the basic block. 2404 for (const auto &BI : *CurrBlock) { 2405 switch (BI.getKind()) { 2406 case CFGElement::Statement: { 2407 CFGStmt CS = BI.castAs<CFGStmt>(); 2408 LocksetBuilder.Visit(CS.getStmt()); 2409 break; 2410 } 2411 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2412 case CFGElement::AutomaticObjectDtor: { 2413 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); 2414 const auto *DD = AD.getDestructorDecl(AC.getASTContext()); 2415 if (!DD->hasAttrs()) 2416 break; 2417 2418 // Create a dummy expression, 2419 auto *VD = const_cast<VarDecl *>(AD.getVarDecl()); 2420 DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(), 2421 VK_LValue, AD.getTriggerStmt()->getEndLoc()); 2422 LocksetBuilder.handleCall(&DRE, DD); 2423 break; 2424 } 2425 default: 2426 break; 2427 } 2428 } 2429 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2430 2431 // For every back edge from CurrBlock (the end of the loop) to another block 2432 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2433 // the one held at the beginning of FirstLoopBlock. We can look up the 2434 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2435 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2436 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2437 // if CurrBlock -> *SI is *not* a back edge 2438 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 2439 continue; 2440 2441 CFGBlock *FirstLoopBlock = *SI; 2442 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2443 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2444 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, 2445 PreLoop->EntryLoc, 2446 LEK_LockedSomeLoopIterations, 2447 false); 2448 } 2449 } 2450 2451 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2452 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2453 2454 // Skip the final check if the exit block is unreachable. 2455 if (!Final->Reachable) 2456 return; 2457 2458 // By default, we expect all locks held on entry to be held on exit. 2459 FactSet ExpectedExitSet = Initial->EntrySet; 2460 2461 // Adjust the expected exit set by adding or removing locks, as declared 2462 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2463 // issue the appropriate warning. 2464 // FIXME: the location here is not quite right. 2465 for (const auto &Lock : ExclusiveLocksAcquired) 2466 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 2467 Lock, LK_Exclusive, D->getLocation())); 2468 for (const auto &Lock : SharedLocksAcquired) 2469 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 2470 Lock, LK_Shared, D->getLocation())); 2471 for (const auto &Lock : LocksReleased) 2472 ExpectedExitSet.removeLock(FactMan, Lock); 2473 2474 // FIXME: Should we call this function for all blocks which exit the function? 2475 intersectAndWarn(ExpectedExitSet, Final->ExitSet, 2476 Final->ExitLoc, 2477 LEK_LockedAtEndOfFunction, 2478 LEK_NotLockedAtEndOfFunction, 2479 false); 2480 2481 Handler.leaveFunction(CurrentFunction); 2482 } 2483 2484 /// Check a function's CFG for thread-safety violations. 2485 /// 2486 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2487 /// at the end of each block, and issue warnings for thread safety violations. 2488 /// Each block in the CFG is traversed exactly once. 2489 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2490 ThreadSafetyHandler &Handler, 2491 BeforeSet **BSet) { 2492 if (!*BSet) 2493 *BSet = new BeforeSet; 2494 ThreadSafetyAnalyzer Analyzer(Handler, *BSet); 2495 Analyzer.runAnalysis(AC); 2496 } 2497 2498 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } 2499 2500 /// Helper function that returns a LockKind required for the given level 2501 /// of access. 2502 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { 2503 switch (AK) { 2504 case AK_Read : 2505 return LK_Shared; 2506 case AK_Written : 2507 return LK_Exclusive; 2508 } 2509 llvm_unreachable("Unknown AccessKind"); 2510 } 2511