1 //===- ThreadSafety.cpp ---------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // A intra-procedural analysis for thread safety (e.g. deadlocks and race 11 // conditions), based off of an annotation system. 12 // 13 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html 14 // for more information. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "clang/Analysis/Analyses/ThreadSafety.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/Decl.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclGroup.h" 23 #include "clang/AST/Expr.h" 24 #include "clang/AST/ExprCXX.h" 25 #include "clang/AST/OperationKinds.h" 26 #include "clang/AST/Stmt.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/AST/Type.h" 29 #include "clang/Analysis/Analyses/PostOrderCFGView.h" 30 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" 31 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" 32 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" 33 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h" 34 #include "clang/Analysis/AnalysisDeclContext.h" 35 #include "clang/Analysis/CFG.h" 36 #include "clang/Basic/Builtins.h" 37 #include "clang/Basic/LLVM.h" 38 #include "clang/Basic/OperatorKinds.h" 39 #include "clang/Basic/SourceLocation.h" 40 #include "clang/Basic/Specifiers.h" 41 #include "llvm/ADT/ArrayRef.h" 42 #include "llvm/ADT/DenseMap.h" 43 #include "llvm/ADT/ImmutableMap.h" 44 #include "llvm/ADT/Optional.h" 45 #include "llvm/ADT/STLExtras.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/StringRef.h" 48 #include "llvm/Support/Allocator.h" 49 #include "llvm/Support/Casting.h" 50 #include "llvm/Support/ErrorHandling.h" 51 #include "llvm/Support/raw_ostream.h" 52 #include <algorithm> 53 #include <cassert> 54 #include <functional> 55 #include <iterator> 56 #include <memory> 57 #include <string> 58 #include <type_traits> 59 #include <utility> 60 #include <vector> 61 62 using namespace clang; 63 using namespace threadSafety; 64 65 // Key method definition 66 ThreadSafetyHandler::~ThreadSafetyHandler() = default; 67 68 /// Issue a warning about an invalid lock expression 69 static void warnInvalidLock(ThreadSafetyHandler &Handler, 70 const Expr *MutexExp, const NamedDecl *D, 71 const Expr *DeclExp, StringRef Kind) { 72 SourceLocation Loc; 73 if (DeclExp) 74 Loc = DeclExp->getExprLoc(); 75 76 // FIXME: add a note about the attribute location in MutexExp or D 77 if (Loc.isValid()) 78 Handler.handleInvalidLockExp(Kind, Loc); 79 } 80 81 namespace { 82 83 /// A set of CapabilityExpr objects, which are compiled from thread safety 84 /// attributes on a function. 85 class CapExprSet : public SmallVector<CapabilityExpr, 4> { 86 public: 87 /// Push M onto list, but discard duplicates. 88 void push_back_nodup(const CapabilityExpr &CapE) { 89 iterator It = std::find_if(begin(), end(), 90 [=](const CapabilityExpr &CapE2) { 91 return CapE.equals(CapE2); 92 }); 93 if (It == end()) 94 push_back(CapE); 95 } 96 }; 97 98 class FactManager; 99 class FactSet; 100 101 /// This is a helper class that stores a fact that is known at a 102 /// particular point in program execution. Currently, a fact is a capability, 103 /// along with additional information, such as where it was acquired, whether 104 /// it is exclusive or shared, etc. 105 /// 106 /// FIXME: this analysis does not currently support re-entrant locking. 107 class FactEntry : public CapabilityExpr { 108 private: 109 /// Exclusive or shared. 110 LockKind LKind; 111 112 /// Where it was acquired. 113 SourceLocation AcquireLoc; 114 115 /// True if the lock was asserted. 116 bool Asserted; 117 118 /// True if the lock was declared. 119 bool Declared; 120 121 public: 122 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 123 bool Asrt, bool Declrd = false) 124 : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt), 125 Declared(Declrd) {} 126 virtual ~FactEntry() = default; 127 128 LockKind kind() const { return LKind; } 129 SourceLocation loc() const { return AcquireLoc; } 130 bool asserted() const { return Asserted; } 131 bool declared() const { return Declared; } 132 133 void setDeclared(bool D) { Declared = D; } 134 135 virtual void 136 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 137 SourceLocation JoinLoc, LockErrorKind LEK, 138 ThreadSafetyHandler &Handler) const = 0; 139 virtual void handleLock(FactSet &FSet, FactManager &FactMan, 140 const FactEntry &entry, ThreadSafetyHandler &Handler, 141 StringRef DiagKind) const = 0; 142 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, 143 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 144 bool FullyRemove, ThreadSafetyHandler &Handler, 145 StringRef DiagKind) const = 0; 146 147 // Return true if LKind >= LK, where exclusive > shared 148 bool isAtLeast(LockKind LK) const { 149 return (LKind == LK_Exclusive) || (LK == LK_Shared); 150 } 151 }; 152 153 using FactID = unsigned short; 154 155 /// FactManager manages the memory for all facts that are created during 156 /// the analysis of a single routine. 157 class FactManager { 158 private: 159 std::vector<std::unique_ptr<const FactEntry>> Facts; 160 161 public: 162 FactID newFact(std::unique_ptr<FactEntry> Entry) { 163 Facts.push_back(std::move(Entry)); 164 return static_cast<unsigned short>(Facts.size() - 1); 165 } 166 167 const FactEntry &operator[](FactID F) const { return *Facts[F]; } 168 }; 169 170 /// A FactSet is the set of facts that are known to be true at a 171 /// particular program point. FactSets must be small, because they are 172 /// frequently copied, and are thus implemented as a set of indices into a 173 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 174 /// locks, so we can get away with doing a linear search for lookup. Note 175 /// that a hashtable or map is inappropriate in this case, because lookups 176 /// may involve partial pattern matches, rather than exact matches. 177 class FactSet { 178 private: 179 using FactVec = SmallVector<FactID, 4>; 180 181 FactVec FactIDs; 182 183 public: 184 using iterator = FactVec::iterator; 185 using const_iterator = FactVec::const_iterator; 186 187 iterator begin() { return FactIDs.begin(); } 188 const_iterator begin() const { return FactIDs.begin(); } 189 190 iterator end() { return FactIDs.end(); } 191 const_iterator end() const { return FactIDs.end(); } 192 193 bool isEmpty() const { return FactIDs.size() == 0; } 194 195 // Return true if the set contains only negative facts 196 bool isEmpty(FactManager &FactMan) const { 197 for (const auto FID : *this) { 198 if (!FactMan[FID].negative()) 199 return false; 200 } 201 return true; 202 } 203 204 void addLockByID(FactID ID) { FactIDs.push_back(ID); } 205 206 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { 207 FactID F = FM.newFact(std::move(Entry)); 208 FactIDs.push_back(F); 209 return F; 210 } 211 212 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { 213 unsigned n = FactIDs.size(); 214 if (n == 0) 215 return false; 216 217 for (unsigned i = 0; i < n-1; ++i) { 218 if (FM[FactIDs[i]].matches(CapE)) { 219 FactIDs[i] = FactIDs[n-1]; 220 FactIDs.pop_back(); 221 return true; 222 } 223 } 224 if (FM[FactIDs[n-1]].matches(CapE)) { 225 FactIDs.pop_back(); 226 return true; 227 } 228 return false; 229 } 230 231 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { 232 return std::find_if(begin(), end(), [&](FactID ID) { 233 return FM[ID].matches(CapE); 234 }); 235 } 236 237 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { 238 auto I = std::find_if(begin(), end(), [&](FactID ID) { 239 return FM[ID].matches(CapE); 240 }); 241 return I != end() ? &FM[*I] : nullptr; 242 } 243 244 const FactEntry *findLockUniv(FactManager &FM, 245 const CapabilityExpr &CapE) const { 246 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 247 return FM[ID].matchesUniv(CapE); 248 }); 249 return I != end() ? &FM[*I] : nullptr; 250 } 251 252 const FactEntry *findPartialMatch(FactManager &FM, 253 const CapabilityExpr &CapE) const { 254 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 255 return FM[ID].partiallyMatches(CapE); 256 }); 257 return I != end() ? &FM[*I] : nullptr; 258 } 259 260 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { 261 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 262 return FM[ID].valueDecl() == Vd; 263 }); 264 return I != end(); 265 } 266 }; 267 268 class ThreadSafetyAnalyzer; 269 270 } // namespace 271 272 namespace clang { 273 namespace threadSafety { 274 275 class BeforeSet { 276 private: 277 using BeforeVect = SmallVector<const ValueDecl *, 4>; 278 279 struct BeforeInfo { 280 BeforeVect Vect; 281 int Visited = 0; 282 283 BeforeInfo() = default; 284 BeforeInfo(BeforeInfo &&) = default; 285 }; 286 287 using BeforeMap = 288 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; 289 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; 290 291 public: 292 BeforeSet() = default; 293 294 BeforeInfo* insertAttrExprs(const ValueDecl* Vd, 295 ThreadSafetyAnalyzer& Analyzer); 296 297 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, 298 ThreadSafetyAnalyzer &Analyzer); 299 300 void checkBeforeAfter(const ValueDecl* Vd, 301 const FactSet& FSet, 302 ThreadSafetyAnalyzer& Analyzer, 303 SourceLocation Loc, StringRef CapKind); 304 305 private: 306 BeforeMap BMap; 307 CycleMap CycMap; 308 }; 309 310 } // namespace threadSafety 311 } // namespace clang 312 313 namespace { 314 315 class LocalVariableMap; 316 317 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; 318 319 /// A side (entry or exit) of a CFG node. 320 enum CFGBlockSide { CBS_Entry, CBS_Exit }; 321 322 /// CFGBlockInfo is a struct which contains all the information that is 323 /// maintained for each block in the CFG. See LocalVariableMap for more 324 /// information about the contexts. 325 struct CFGBlockInfo { 326 // Lockset held at entry to block 327 FactSet EntrySet; 328 329 // Lockset held at exit from block 330 FactSet ExitSet; 331 332 // Context held at entry to block 333 LocalVarContext EntryContext; 334 335 // Context held at exit from block 336 LocalVarContext ExitContext; 337 338 // Location of first statement in block 339 SourceLocation EntryLoc; 340 341 // Location of last statement in block. 342 SourceLocation ExitLoc; 343 344 // Used to replay contexts later 345 unsigned EntryIndex; 346 347 // Is this block reachable? 348 bool Reachable = false; 349 350 const FactSet &getSet(CFGBlockSide Side) const { 351 return Side == CBS_Entry ? EntrySet : ExitSet; 352 } 353 354 SourceLocation getLocation(CFGBlockSide Side) const { 355 return Side == CBS_Entry ? EntryLoc : ExitLoc; 356 } 357 358 private: 359 CFGBlockInfo(LocalVarContext EmptyCtx) 360 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} 361 362 public: 363 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 364 }; 365 366 // A LocalVariableMap maintains a map from local variables to their currently 367 // valid definitions. It provides SSA-like functionality when traversing the 368 // CFG. Like SSA, each definition or assignment to a variable is assigned a 369 // unique name (an integer), which acts as the SSA name for that definition. 370 // The total set of names is shared among all CFG basic blocks. 371 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs 372 // with their SSA-names. Instead, we compute a Context for each point in the 373 // code, which maps local variables to the appropriate SSA-name. This map 374 // changes with each assignment. 375 // 376 // The map is computed in a single pass over the CFG. Subsequent analyses can 377 // then query the map to find the appropriate Context for a statement, and use 378 // that Context to look up the definitions of variables. 379 class LocalVariableMap { 380 public: 381 using Context = LocalVarContext; 382 383 /// A VarDefinition consists of an expression, representing the value of the 384 /// variable, along with the context in which that expression should be 385 /// interpreted. A reference VarDefinition does not itself contain this 386 /// information, but instead contains a pointer to a previous VarDefinition. 387 struct VarDefinition { 388 public: 389 friend class LocalVariableMap; 390 391 // The original declaration for this variable. 392 const NamedDecl *Dec; 393 394 // The expression for this variable, OR 395 const Expr *Exp = nullptr; 396 397 // Reference to another VarDefinition 398 unsigned Ref = 0; 399 400 // The map with which Exp should be interpreted. 401 Context Ctx; 402 403 bool isReference() { return !Exp; } 404 405 private: 406 // Create ordinary variable definition 407 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 408 : Dec(D), Exp(E), Ctx(C) {} 409 410 // Create reference to previous definition 411 VarDefinition(const NamedDecl *D, unsigned R, Context C) 412 : Dec(D), Ref(R), Ctx(C) {} 413 }; 414 415 private: 416 Context::Factory ContextFactory; 417 std::vector<VarDefinition> VarDefinitions; 418 std::vector<unsigned> CtxIndices; 419 std::vector<std::pair<const Stmt *, Context>> SavedContexts; 420 421 public: 422 LocalVariableMap() { 423 // index 0 is a placeholder for undefined variables (aka phi-nodes). 424 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); 425 } 426 427 /// Look up a definition, within the given context. 428 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 429 const unsigned *i = Ctx.lookup(D); 430 if (!i) 431 return nullptr; 432 assert(*i < VarDefinitions.size()); 433 return &VarDefinitions[*i]; 434 } 435 436 /// Look up the definition for D within the given context. Returns 437 /// NULL if the expression is not statically known. If successful, also 438 /// modifies Ctx to hold the context of the return Expr. 439 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 440 const unsigned *P = Ctx.lookup(D); 441 if (!P) 442 return nullptr; 443 444 unsigned i = *P; 445 while (i > 0) { 446 if (VarDefinitions[i].Exp) { 447 Ctx = VarDefinitions[i].Ctx; 448 return VarDefinitions[i].Exp; 449 } 450 i = VarDefinitions[i].Ref; 451 } 452 return nullptr; 453 } 454 455 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 456 457 /// Return the next context after processing S. This function is used by 458 /// clients of the class to get the appropriate context when traversing the 459 /// CFG. It must be called for every assignment or DeclStmt. 460 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { 461 if (SavedContexts[CtxIndex+1].first == S) { 462 CtxIndex++; 463 Context Result = SavedContexts[CtxIndex].second; 464 return Result; 465 } 466 return C; 467 } 468 469 void dumpVarDefinitionName(unsigned i) { 470 if (i == 0) { 471 llvm::errs() << "Undefined"; 472 return; 473 } 474 const NamedDecl *Dec = VarDefinitions[i].Dec; 475 if (!Dec) { 476 llvm::errs() << "<<NULL>>"; 477 return; 478 } 479 Dec->printName(llvm::errs()); 480 llvm::errs() << "." << i << " " << ((const void*) Dec); 481 } 482 483 /// Dumps an ASCII representation of the variable map to llvm::errs() 484 void dump() { 485 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 486 const Expr *Exp = VarDefinitions[i].Exp; 487 unsigned Ref = VarDefinitions[i].Ref; 488 489 dumpVarDefinitionName(i); 490 llvm::errs() << " = "; 491 if (Exp) Exp->dump(); 492 else { 493 dumpVarDefinitionName(Ref); 494 llvm::errs() << "\n"; 495 } 496 } 497 } 498 499 /// Dumps an ASCII representation of a Context to llvm::errs() 500 void dumpContext(Context C) { 501 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 502 const NamedDecl *D = I.getKey(); 503 D->printName(llvm::errs()); 504 const unsigned *i = C.lookup(D); 505 llvm::errs() << " -> "; 506 dumpVarDefinitionName(*i); 507 llvm::errs() << "\n"; 508 } 509 } 510 511 /// Builds the variable map. 512 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, 513 std::vector<CFGBlockInfo> &BlockInfo); 514 515 protected: 516 friend class VarMapBuilder; 517 518 // Get the current context index 519 unsigned getContextIndex() { return SavedContexts.size()-1; } 520 521 // Save the current context for later replay 522 void saveContext(const Stmt *S, Context C) { 523 SavedContexts.push_back(std::make_pair(S, C)); 524 } 525 526 // Adds a new definition to the given context, and returns a new context. 527 // This method should be called when declaring a new variable. 528 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { 529 assert(!Ctx.contains(D)); 530 unsigned newID = VarDefinitions.size(); 531 Context NewCtx = ContextFactory.add(Ctx, D, newID); 532 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 533 return NewCtx; 534 } 535 536 // Add a new reference to an existing definition. 537 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 538 unsigned newID = VarDefinitions.size(); 539 Context NewCtx = ContextFactory.add(Ctx, D, newID); 540 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 541 return NewCtx; 542 } 543 544 // Updates a definition only if that definition is already in the map. 545 // This method should be called when assigning to an existing variable. 546 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 547 if (Ctx.contains(D)) { 548 unsigned newID = VarDefinitions.size(); 549 Context NewCtx = ContextFactory.remove(Ctx, D); 550 NewCtx = ContextFactory.add(NewCtx, D, newID); 551 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 552 return NewCtx; 553 } 554 return Ctx; 555 } 556 557 // Removes a definition from the context, but keeps the variable name 558 // as a valid variable. The index 0 is a placeholder for cleared definitions. 559 Context clearDefinition(const NamedDecl *D, Context Ctx) { 560 Context NewCtx = Ctx; 561 if (NewCtx.contains(D)) { 562 NewCtx = ContextFactory.remove(NewCtx, D); 563 NewCtx = ContextFactory.add(NewCtx, D, 0); 564 } 565 return NewCtx; 566 } 567 568 // Remove a definition entirely frmo the context. 569 Context removeDefinition(const NamedDecl *D, Context Ctx) { 570 Context NewCtx = Ctx; 571 if (NewCtx.contains(D)) { 572 NewCtx = ContextFactory.remove(NewCtx, D); 573 } 574 return NewCtx; 575 } 576 577 Context intersectContexts(Context C1, Context C2); 578 Context createReferenceContext(Context C); 579 void intersectBackEdge(Context C1, Context C2); 580 }; 581 582 } // namespace 583 584 // This has to be defined after LocalVariableMap. 585 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 586 return CFGBlockInfo(M.getEmptyContext()); 587 } 588 589 namespace { 590 591 /// Visitor which builds a LocalVariableMap 592 class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { 593 public: 594 LocalVariableMap* VMap; 595 LocalVariableMap::Context Ctx; 596 597 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 598 : VMap(VM), Ctx(C) {} 599 600 void VisitDeclStmt(const DeclStmt *S); 601 void VisitBinaryOperator(const BinaryOperator *BO); 602 }; 603 604 } // namespace 605 606 // Add new local variables to the variable map 607 void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { 608 bool modifiedCtx = false; 609 const DeclGroupRef DGrp = S->getDeclGroup(); 610 for (const auto *D : DGrp) { 611 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { 612 const Expr *E = VD->getInit(); 613 614 // Add local variables with trivial type to the variable map 615 QualType T = VD->getType(); 616 if (T.isTrivialType(VD->getASTContext())) { 617 Ctx = VMap->addDefinition(VD, E, Ctx); 618 modifiedCtx = true; 619 } 620 } 621 } 622 if (modifiedCtx) 623 VMap->saveContext(S, Ctx); 624 } 625 626 // Update local variable definitions in variable map 627 void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { 628 if (!BO->isAssignmentOp()) 629 return; 630 631 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 632 633 // Update the variable map and current context. 634 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 635 const ValueDecl *VDec = DRE->getDecl(); 636 if (Ctx.lookup(VDec)) { 637 if (BO->getOpcode() == BO_Assign) 638 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 639 else 640 // FIXME -- handle compound assignment operators 641 Ctx = VMap->clearDefinition(VDec, Ctx); 642 VMap->saveContext(BO, Ctx); 643 } 644 } 645 } 646 647 // Computes the intersection of two contexts. The intersection is the 648 // set of variables which have the same definition in both contexts; 649 // variables with different definitions are discarded. 650 LocalVariableMap::Context 651 LocalVariableMap::intersectContexts(Context C1, Context C2) { 652 Context Result = C1; 653 for (const auto &P : C1) { 654 const NamedDecl *Dec = P.first; 655 const unsigned *i2 = C2.lookup(Dec); 656 if (!i2) // variable doesn't exist on second path 657 Result = removeDefinition(Dec, Result); 658 else if (*i2 != P.second) // variable exists, but has different definition 659 Result = clearDefinition(Dec, Result); 660 } 661 return Result; 662 } 663 664 // For every variable in C, create a new variable that refers to the 665 // definition in C. Return a new context that contains these new variables. 666 // (We use this for a naive implementation of SSA on loop back-edges.) 667 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 668 Context Result = getEmptyContext(); 669 for (const auto &P : C) 670 Result = addReference(P.first, P.second, Result); 671 return Result; 672 } 673 674 // This routine also takes the intersection of C1 and C2, but it does so by 675 // altering the VarDefinitions. C1 must be the result of an earlier call to 676 // createReferenceContext. 677 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 678 for (const auto &P : C1) { 679 unsigned i1 = P.second; 680 VarDefinition *VDef = &VarDefinitions[i1]; 681 assert(VDef->isReference()); 682 683 const unsigned *i2 = C2.lookup(P.first); 684 if (!i2 || (*i2 != i1)) 685 VDef->Ref = 0; // Mark this variable as undefined 686 } 687 } 688 689 // Traverse the CFG in topological order, so all predecessors of a block 690 // (excluding back-edges) are visited before the block itself. At 691 // each point in the code, we calculate a Context, which holds the set of 692 // variable definitions which are visible at that point in execution. 693 // Visible variables are mapped to their definitions using an array that 694 // contains all definitions. 695 // 696 // At join points in the CFG, the set is computed as the intersection of 697 // the incoming sets along each edge, E.g. 698 // 699 // { Context | VarDefinitions } 700 // int x = 0; { x -> x1 | x1 = 0 } 701 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 702 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 703 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 704 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 705 // 706 // This is essentially a simpler and more naive version of the standard SSA 707 // algorithm. Those definitions that remain in the intersection are from blocks 708 // that strictly dominate the current block. We do not bother to insert proper 709 // phi nodes, because they are not used in our analysis; instead, wherever 710 // a phi node would be required, we simply remove that definition from the 711 // context (E.g. x above). 712 // 713 // The initial traversal does not capture back-edges, so those need to be 714 // handled on a separate pass. Whenever the first pass encounters an 715 // incoming back edge, it duplicates the context, creating new definitions 716 // that refer back to the originals. (These correspond to places where SSA 717 // might have to insert a phi node.) On the second pass, these definitions are 718 // set to NULL if the variable has changed on the back-edge (i.e. a phi 719 // node was actually required.) E.g. 720 // 721 // { Context | VarDefinitions } 722 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 723 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 724 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 725 // ... { y -> y1 | x3 = 2, x2 = 1, ... } 726 void LocalVariableMap::traverseCFG(CFG *CFGraph, 727 const PostOrderCFGView *SortedGraph, 728 std::vector<CFGBlockInfo> &BlockInfo) { 729 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 730 731 CtxIndices.resize(CFGraph->getNumBlockIDs()); 732 733 for (const auto *CurrBlock : *SortedGraph) { 734 unsigned CurrBlockID = CurrBlock->getBlockID(); 735 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 736 737 VisitedBlocks.insert(CurrBlock); 738 739 // Calculate the entry context for the current block 740 bool HasBackEdges = false; 741 bool CtxInit = true; 742 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 743 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 744 // if *PI -> CurrBlock is a back edge, so skip it 745 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { 746 HasBackEdges = true; 747 continue; 748 } 749 750 unsigned PrevBlockID = (*PI)->getBlockID(); 751 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 752 753 if (CtxInit) { 754 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 755 CtxInit = false; 756 } 757 else { 758 CurrBlockInfo->EntryContext = 759 intersectContexts(CurrBlockInfo->EntryContext, 760 PrevBlockInfo->ExitContext); 761 } 762 } 763 764 // Duplicate the context if we have back-edges, so we can call 765 // intersectBackEdges later. 766 if (HasBackEdges) 767 CurrBlockInfo->EntryContext = 768 createReferenceContext(CurrBlockInfo->EntryContext); 769 770 // Create a starting context index for the current block 771 saveContext(nullptr, CurrBlockInfo->EntryContext); 772 CurrBlockInfo->EntryIndex = getContextIndex(); 773 774 // Visit all the statements in the basic block. 775 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 776 for (const auto &BI : *CurrBlock) { 777 switch (BI.getKind()) { 778 case CFGElement::Statement: { 779 CFGStmt CS = BI.castAs<CFGStmt>(); 780 VMapBuilder.Visit(CS.getStmt()); 781 break; 782 } 783 default: 784 break; 785 } 786 } 787 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 788 789 // Mark variables on back edges as "unknown" if they've been changed. 790 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 791 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 792 // if CurrBlock -> *SI is *not* a back edge 793 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 794 continue; 795 796 CFGBlock *FirstLoopBlock = *SI; 797 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 798 Context LoopEnd = CurrBlockInfo->ExitContext; 799 intersectBackEdge(LoopBegin, LoopEnd); 800 } 801 } 802 803 // Put an extra entry at the end of the indexed context array 804 unsigned exitID = CFGraph->getExit().getBlockID(); 805 saveContext(nullptr, BlockInfo[exitID].ExitContext); 806 } 807 808 /// Find the appropriate source locations to use when producing diagnostics for 809 /// each block in the CFG. 810 static void findBlockLocations(CFG *CFGraph, 811 const PostOrderCFGView *SortedGraph, 812 std::vector<CFGBlockInfo> &BlockInfo) { 813 for (const auto *CurrBlock : *SortedGraph) { 814 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 815 816 // Find the source location of the last statement in the block, if the 817 // block is not empty. 818 if (const Stmt *S = CurrBlock->getTerminator()) { 819 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); 820 } else { 821 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 822 BE = CurrBlock->rend(); BI != BE; ++BI) { 823 // FIXME: Handle other CFGElement kinds. 824 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 825 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); 826 break; 827 } 828 } 829 } 830 831 if (CurrBlockInfo->ExitLoc.isValid()) { 832 // This block contains at least one statement. Find the source location 833 // of the first statement in the block. 834 for (const auto &BI : *CurrBlock) { 835 // FIXME: Handle other CFGElement kinds. 836 if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { 837 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); 838 break; 839 } 840 } 841 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 842 CurrBlock != &CFGraph->getExit()) { 843 // The block is empty, and has a single predecessor. Use its exit 844 // location. 845 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 846 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 847 } 848 } 849 } 850 851 namespace { 852 853 class LockableFactEntry : public FactEntry { 854 private: 855 /// managed by ScopedLockable object 856 bool Managed; 857 858 public: 859 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 860 bool Mng = false, bool Asrt = false) 861 : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {} 862 863 void 864 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 865 SourceLocation JoinLoc, LockErrorKind LEK, 866 ThreadSafetyHandler &Handler) const override { 867 if (!Managed && !asserted() && !negative() && !isUniversal()) { 868 Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc, 869 LEK); 870 } 871 } 872 873 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 874 ThreadSafetyHandler &Handler, 875 StringRef DiagKind) const override { 876 Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc()); 877 } 878 879 void handleUnlock(FactSet &FSet, FactManager &FactMan, 880 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 881 bool FullyRemove, ThreadSafetyHandler &Handler, 882 StringRef DiagKind) const override { 883 FSet.removeLock(FactMan, Cp); 884 if (!Cp.negative()) { 885 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 886 !Cp, LK_Exclusive, UnlockLoc)); 887 } 888 } 889 }; 890 891 class ScopedLockableFactEntry : public FactEntry { 892 private: 893 SmallVector<const til::SExpr *, 4> UnderlyingMutexes; 894 895 public: 896 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc, 897 const CapExprSet &Excl, const CapExprSet &Shrd) 898 : FactEntry(CE, LK_Exclusive, Loc, false) { 899 for (const auto &M : Excl) 900 UnderlyingMutexes.push_back(M.sexpr()); 901 for (const auto &M : Shrd) 902 UnderlyingMutexes.push_back(M.sexpr()); 903 } 904 905 void 906 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 907 SourceLocation JoinLoc, LockErrorKind LEK, 908 ThreadSafetyHandler &Handler) const override { 909 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 910 if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) { 911 // If this scoped lock manages another mutex, and if the underlying 912 // mutex is still held, then warn about the underlying mutex. 913 Handler.handleMutexHeldEndOfScope( 914 "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK); 915 } 916 } 917 } 918 919 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 920 ThreadSafetyHandler &Handler, 921 StringRef DiagKind) const override { 922 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 923 CapabilityExpr UnderCp(UnderlyingMutex, false); 924 925 // We're relocking the underlying mutexes. Warn on double locking. 926 if (FSet.findLock(FactMan, UnderCp)) { 927 Handler.handleDoubleLock(DiagKind, UnderCp.toString(), entry.loc()); 928 } else { 929 FSet.removeLock(FactMan, !UnderCp); 930 FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 931 UnderCp, entry.kind(), entry.loc())); 932 } 933 } 934 } 935 936 void handleUnlock(FactSet &FSet, FactManager &FactMan, 937 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 938 bool FullyRemove, ThreadSafetyHandler &Handler, 939 StringRef DiagKind) const override { 940 assert(!Cp.negative() && "Managing object cannot be negative."); 941 for (const auto *UnderlyingMutex : UnderlyingMutexes) { 942 CapabilityExpr UnderCp(UnderlyingMutex, false); 943 auto UnderEntry = llvm::make_unique<LockableFactEntry>( 944 !UnderCp, LK_Exclusive, UnlockLoc); 945 946 if (FullyRemove) { 947 // We're destroying the managing object. 948 // Remove the underlying mutex if it exists; but don't warn. 949 if (FSet.findLock(FactMan, UnderCp)) { 950 FSet.removeLock(FactMan, UnderCp); 951 FSet.addLock(FactMan, std::move(UnderEntry)); 952 } 953 } else { 954 // We're releasing the underlying mutex, but not destroying the 955 // managing object. Warn on dual release. 956 if (!FSet.findLock(FactMan, UnderCp)) { 957 Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(), 958 UnlockLoc); 959 } 960 FSet.removeLock(FactMan, UnderCp); 961 FSet.addLock(FactMan, std::move(UnderEntry)); 962 } 963 } 964 if (FullyRemove) 965 FSet.removeLock(FactMan, Cp); 966 } 967 }; 968 969 /// Class which implements the core thread safety analysis routines. 970 class ThreadSafetyAnalyzer { 971 friend class BuildLockset; 972 friend class threadSafety::BeforeSet; 973 974 llvm::BumpPtrAllocator Bpa; 975 threadSafety::til::MemRegionRef Arena; 976 threadSafety::SExprBuilder SxBuilder; 977 978 ThreadSafetyHandler &Handler; 979 const CXXMethodDecl *CurrentMethod; 980 LocalVariableMap LocalVarMap; 981 FactManager FactMan; 982 std::vector<CFGBlockInfo> BlockInfo; 983 984 BeforeSet *GlobalBeforeSet; 985 986 public: 987 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) 988 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} 989 990 bool inCurrentScope(const CapabilityExpr &CapE); 991 992 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, 993 StringRef DiagKind, bool ReqAttr = false); 994 void removeLock(FactSet &FSet, const CapabilityExpr &CapE, 995 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, 996 StringRef DiagKind); 997 998 template <typename AttrType> 999 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1000 const NamedDecl *D, VarDecl *SelfDecl = nullptr); 1001 1002 template <class AttrType> 1003 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1004 const NamedDecl *D, 1005 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1006 Expr *BrE, bool Neg); 1007 1008 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1009 bool &Negate); 1010 1011 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1012 const CFGBlock* PredBlock, 1013 const CFGBlock *CurrBlock); 1014 1015 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1016 SourceLocation JoinLoc, 1017 LockErrorKind LEK1, LockErrorKind LEK2, 1018 bool Modify=true); 1019 1020 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1021 SourceLocation JoinLoc, LockErrorKind LEK1, 1022 bool Modify=true) { 1023 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify); 1024 } 1025 1026 void runAnalysis(AnalysisDeclContext &AC); 1027 }; 1028 1029 } // namespace 1030 1031 /// Process acquired_before and acquired_after attributes on Vd. 1032 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, 1033 ThreadSafetyAnalyzer& Analyzer) { 1034 // Create a new entry for Vd. 1035 BeforeInfo *Info = nullptr; 1036 { 1037 // Keep InfoPtr in its own scope in case BMap is modified later and the 1038 // reference becomes invalid. 1039 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; 1040 if (!InfoPtr) 1041 InfoPtr.reset(new BeforeInfo()); 1042 Info = InfoPtr.get(); 1043 } 1044 1045 for (const auto *At : Vd->attrs()) { 1046 switch (At->getKind()) { 1047 case attr::AcquiredBefore: { 1048 const auto *A = cast<AcquiredBeforeAttr>(At); 1049 1050 // Read exprs from the attribute, and add them to BeforeVect. 1051 for (const auto *Arg : A->args()) { 1052 CapabilityExpr Cp = 1053 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1054 if (const ValueDecl *Cpvd = Cp.valueDecl()) { 1055 Info->Vect.push_back(Cpvd); 1056 const auto It = BMap.find(Cpvd); 1057 if (It == BMap.end()) 1058 insertAttrExprs(Cpvd, Analyzer); 1059 } 1060 } 1061 break; 1062 } 1063 case attr::AcquiredAfter: { 1064 const auto *A = cast<AcquiredAfterAttr>(At); 1065 1066 // Read exprs from the attribute, and add them to BeforeVect. 1067 for (const auto *Arg : A->args()) { 1068 CapabilityExpr Cp = 1069 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1070 if (const ValueDecl *ArgVd = Cp.valueDecl()) { 1071 // Get entry for mutex listed in attribute 1072 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); 1073 ArgInfo->Vect.push_back(Vd); 1074 } 1075 } 1076 break; 1077 } 1078 default: 1079 break; 1080 } 1081 } 1082 1083 return Info; 1084 } 1085 1086 BeforeSet::BeforeInfo * 1087 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, 1088 ThreadSafetyAnalyzer &Analyzer) { 1089 auto It = BMap.find(Vd); 1090 BeforeInfo *Info = nullptr; 1091 if (It == BMap.end()) 1092 Info = insertAttrExprs(Vd, Analyzer); 1093 else 1094 Info = It->second.get(); 1095 assert(Info && "BMap contained nullptr?"); 1096 return Info; 1097 } 1098 1099 /// Return true if any mutexes in FSet are in the acquired_before set of Vd. 1100 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, 1101 const FactSet& FSet, 1102 ThreadSafetyAnalyzer& Analyzer, 1103 SourceLocation Loc, StringRef CapKind) { 1104 SmallVector<BeforeInfo*, 8> InfoVect; 1105 1106 // Do a depth-first traversal of Vd. 1107 // Return true if there are cycles. 1108 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { 1109 if (!Vd) 1110 return false; 1111 1112 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); 1113 1114 if (Info->Visited == 1) 1115 return true; 1116 1117 if (Info->Visited == 2) 1118 return false; 1119 1120 if (Info->Vect.empty()) 1121 return false; 1122 1123 InfoVect.push_back(Info); 1124 Info->Visited = 1; 1125 for (const auto *Vdb : Info->Vect) { 1126 // Exclude mutexes in our immediate before set. 1127 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { 1128 StringRef L1 = StartVd->getName(); 1129 StringRef L2 = Vdb->getName(); 1130 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); 1131 } 1132 // Transitively search other before sets, and warn on cycles. 1133 if (traverse(Vdb)) { 1134 if (CycMap.find(Vd) == CycMap.end()) { 1135 CycMap.insert(std::make_pair(Vd, true)); 1136 StringRef L1 = Vd->getName(); 1137 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); 1138 } 1139 } 1140 } 1141 Info->Visited = 2; 1142 return false; 1143 }; 1144 1145 traverse(StartVd); 1146 1147 for (auto *Info : InfoVect) 1148 Info->Visited = 0; 1149 } 1150 1151 /// Gets the value decl pointer from DeclRefExprs or MemberExprs. 1152 static const ValueDecl *getValueDecl(const Expr *Exp) { 1153 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1154 return getValueDecl(CE->getSubExpr()); 1155 1156 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) 1157 return DR->getDecl(); 1158 1159 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) 1160 return ME->getMemberDecl(); 1161 1162 return nullptr; 1163 } 1164 1165 namespace { 1166 1167 template <typename Ty> 1168 class has_arg_iterator_range { 1169 using yes = char[1]; 1170 using no = char[2]; 1171 1172 template <typename Inner> 1173 static yes& test(Inner *I, decltype(I->args()) * = nullptr); 1174 1175 template <typename> 1176 static no& test(...); 1177 1178 public: 1179 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); 1180 }; 1181 1182 } // namespace 1183 1184 static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { 1185 return A->getName(); 1186 } 1187 1188 static StringRef ClassifyDiagnostic(QualType VDT) { 1189 // We need to look at the declaration of the type of the value to determine 1190 // which it is. The type should either be a record or a typedef, or a pointer 1191 // or reference thereof. 1192 if (const auto *RT = VDT->getAs<RecordType>()) { 1193 if (const auto *RD = RT->getDecl()) 1194 if (const auto *CA = RD->getAttr<CapabilityAttr>()) 1195 return ClassifyDiagnostic(CA); 1196 } else if (const auto *TT = VDT->getAs<TypedefType>()) { 1197 if (const auto *TD = TT->getDecl()) 1198 if (const auto *CA = TD->getAttr<CapabilityAttr>()) 1199 return ClassifyDiagnostic(CA); 1200 } else if (VDT->isPointerType() || VDT->isReferenceType()) 1201 return ClassifyDiagnostic(VDT->getPointeeType()); 1202 1203 return "mutex"; 1204 } 1205 1206 static StringRef ClassifyDiagnostic(const ValueDecl *VD) { 1207 assert(VD && "No ValueDecl passed"); 1208 1209 // The ValueDecl is the declaration of a mutex or role (hopefully). 1210 return ClassifyDiagnostic(VD->getType()); 1211 } 1212 1213 template <typename AttrTy> 1214 static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value, 1215 StringRef>::type 1216 ClassifyDiagnostic(const AttrTy *A) { 1217 if (const ValueDecl *VD = getValueDecl(A->getArg())) 1218 return ClassifyDiagnostic(VD); 1219 return "mutex"; 1220 } 1221 1222 template <typename AttrTy> 1223 static typename std::enable_if<has_arg_iterator_range<AttrTy>::value, 1224 StringRef>::type 1225 ClassifyDiagnostic(const AttrTy *A) { 1226 for (const auto *Arg : A->args()) { 1227 if (const ValueDecl *VD = getValueDecl(Arg)) 1228 return ClassifyDiagnostic(VD); 1229 } 1230 return "mutex"; 1231 } 1232 1233 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { 1234 if (!CurrentMethod) 1235 return false; 1236 if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) { 1237 const auto *VD = P->clangDecl(); 1238 if (VD) 1239 return VD->getDeclContext() == CurrentMethod->getDeclContext(); 1240 } 1241 return false; 1242 } 1243 1244 /// Add a new lock to the lockset, warning if the lock is already there. 1245 /// \param ReqAttr -- true if this is part of an initial Requires attribute. 1246 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, 1247 std::unique_ptr<FactEntry> Entry, 1248 StringRef DiagKind, bool ReqAttr) { 1249 if (Entry->shouldIgnore()) 1250 return; 1251 1252 if (!ReqAttr && !Entry->negative()) { 1253 // look for the negative capability, and remove it from the fact set. 1254 CapabilityExpr NegC = !*Entry; 1255 const FactEntry *Nen = FSet.findLock(FactMan, NegC); 1256 if (Nen) { 1257 FSet.removeLock(FactMan, NegC); 1258 } 1259 else { 1260 if (inCurrentScope(*Entry) && !Entry->asserted()) 1261 Handler.handleNegativeNotHeld(DiagKind, Entry->toString(), 1262 NegC.toString(), Entry->loc()); 1263 } 1264 } 1265 1266 // Check before/after constraints 1267 if (Handler.issueBetaWarnings() && 1268 !Entry->asserted() && !Entry->declared()) { 1269 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, 1270 Entry->loc(), DiagKind); 1271 } 1272 1273 // FIXME: Don't always warn when we have support for reentrant locks. 1274 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { 1275 if (!Entry->asserted()) 1276 Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind); 1277 } else { 1278 FSet.addLock(FactMan, std::move(Entry)); 1279 } 1280 } 1281 1282 /// Remove a lock from the lockset, warning if the lock is not there. 1283 /// \param UnlockLoc The source location of the unlock (only used in error msg) 1284 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, 1285 SourceLocation UnlockLoc, 1286 bool FullyRemove, LockKind ReceivedKind, 1287 StringRef DiagKind) { 1288 if (Cp.shouldIgnore()) 1289 return; 1290 1291 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1292 if (!LDat) { 1293 Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc); 1294 return; 1295 } 1296 1297 // Generic lock removal doesn't care about lock kind mismatches, but 1298 // otherwise diagnose when the lock kinds are mismatched. 1299 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { 1300 Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), 1301 LDat->kind(), ReceivedKind, UnlockLoc); 1302 } 1303 1304 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler, 1305 DiagKind); 1306 } 1307 1308 /// Extract the list of mutexIDs from the attribute on an expression, 1309 /// and push them onto Mtxs, discarding any duplicates. 1310 template <typename AttrType> 1311 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1312 const Expr *Exp, const NamedDecl *D, 1313 VarDecl *SelfDecl) { 1314 if (Attr->args_size() == 0) { 1315 // The mutex held is the "this" object. 1316 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl); 1317 if (Cp.isInvalid()) { 1318 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1319 return; 1320 } 1321 //else 1322 if (!Cp.shouldIgnore()) 1323 Mtxs.push_back_nodup(Cp); 1324 return; 1325 } 1326 1327 for (const auto *Arg : Attr->args()) { 1328 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl); 1329 if (Cp.isInvalid()) { 1330 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1331 continue; 1332 } 1333 //else 1334 if (!Cp.shouldIgnore()) 1335 Mtxs.push_back_nodup(Cp); 1336 } 1337 } 1338 1339 /// Extract the list of mutexIDs from a trylock attribute. If the 1340 /// trylock applies to the given edge, then push them onto Mtxs, discarding 1341 /// any duplicates. 1342 template <class AttrType> 1343 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1344 const Expr *Exp, const NamedDecl *D, 1345 const CFGBlock *PredBlock, 1346 const CFGBlock *CurrBlock, 1347 Expr *BrE, bool Neg) { 1348 // Find out which branch has the lock 1349 bool branch = false; 1350 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) 1351 branch = BLE->getValue(); 1352 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) 1353 branch = ILE->getValue().getBoolValue(); 1354 1355 int branchnum = branch ? 0 : 1; 1356 if (Neg) 1357 branchnum = !branchnum; 1358 1359 // If we've taken the trylock branch, then add the lock 1360 int i = 0; 1361 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1362 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1363 if (*SI == CurrBlock && i == branchnum) 1364 getMutexIDs(Mtxs, Attr, Exp, D); 1365 } 1366 } 1367 1368 static bool getStaticBooleanValue(Expr *E, bool &TCond) { 1369 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1370 TCond = false; 1371 return true; 1372 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1373 TCond = BLE->getValue(); 1374 return true; 1375 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { 1376 TCond = ILE->getValue().getBoolValue(); 1377 return true; 1378 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) 1379 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1380 return false; 1381 } 1382 1383 // If Cond can be traced back to a function call, return the call expression. 1384 // The negate variable should be called with false, and will be set to true 1385 // if the function call is negated, e.g. if (!mu.tryLock(...)) 1386 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1387 LocalVarContext C, 1388 bool &Negate) { 1389 if (!Cond) 1390 return nullptr; 1391 1392 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) { 1393 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect) 1394 return getTrylockCallExpr(CallExp->getArg(0), C, Negate); 1395 return CallExp; 1396 } 1397 else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) 1398 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1399 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) 1400 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1401 else if (const auto *FE = dyn_cast<FullExpr>(Cond)) 1402 return getTrylockCallExpr(FE->getSubExpr(), C, Negate); 1403 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1404 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1405 return getTrylockCallExpr(E, C, Negate); 1406 } 1407 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { 1408 if (UOP->getOpcode() == UO_LNot) { 1409 Negate = !Negate; 1410 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1411 } 1412 return nullptr; 1413 } 1414 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { 1415 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1416 if (BOP->getOpcode() == BO_NE) 1417 Negate = !Negate; 1418 1419 bool TCond = false; 1420 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1421 if (!TCond) Negate = !Negate; 1422 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1423 } 1424 TCond = false; 1425 if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1426 if (!TCond) Negate = !Negate; 1427 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1428 } 1429 return nullptr; 1430 } 1431 if (BOP->getOpcode() == BO_LAnd) { 1432 // LHS must have been evaluated in a different block. 1433 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1434 } 1435 if (BOP->getOpcode() == BO_LOr) 1436 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1437 return nullptr; 1438 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) { 1439 bool TCond, FCond; 1440 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) && 1441 getStaticBooleanValue(COP->getFalseExpr(), FCond)) { 1442 if (TCond && !FCond) 1443 return getTrylockCallExpr(COP->getCond(), C, Negate); 1444 if (!TCond && FCond) { 1445 Negate = !Negate; 1446 return getTrylockCallExpr(COP->getCond(), C, Negate); 1447 } 1448 } 1449 } 1450 return nullptr; 1451 } 1452 1453 /// Find the lockset that holds on the edge between PredBlock 1454 /// and CurrBlock. The edge set is the exit set of PredBlock (passed 1455 /// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1456 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1457 const FactSet &ExitSet, 1458 const CFGBlock *PredBlock, 1459 const CFGBlock *CurrBlock) { 1460 Result = ExitSet; 1461 1462 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1463 // We don't acquire try-locks on ?: branches, only when its result is used. 1464 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminator())) 1465 return; 1466 1467 bool Negate = false; 1468 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1469 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1470 StringRef CapDiagKind = "mutex"; 1471 1472 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1473 if (!Exp) 1474 return; 1475 1476 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1477 if(!FunDecl || !FunDecl->hasAttrs()) 1478 return; 1479 1480 CapExprSet ExclusiveLocksToAdd; 1481 CapExprSet SharedLocksToAdd; 1482 1483 // If the condition is a call to a Trylock function, then grab the attributes 1484 for (const auto *Attr : FunDecl->attrs()) { 1485 switch (Attr->getKind()) { 1486 case attr::TryAcquireCapability: { 1487 auto *A = cast<TryAcquireCapabilityAttr>(Attr); 1488 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 1489 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), 1490 Negate); 1491 CapDiagKind = ClassifyDiagnostic(A); 1492 break; 1493 }; 1494 case attr::ExclusiveTrylockFunction: { 1495 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); 1496 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1497 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1498 CapDiagKind = ClassifyDiagnostic(A); 1499 break; 1500 } 1501 case attr::SharedTrylockFunction: { 1502 const auto *A = cast<SharedTrylockFunctionAttr>(Attr); 1503 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, 1504 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1505 CapDiagKind = ClassifyDiagnostic(A); 1506 break; 1507 } 1508 default: 1509 break; 1510 } 1511 } 1512 1513 // Add and remove locks. 1514 SourceLocation Loc = Exp->getExprLoc(); 1515 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) 1516 addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd, 1517 LK_Exclusive, Loc), 1518 CapDiagKind); 1519 for (const auto &SharedLockToAdd : SharedLocksToAdd) 1520 addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd, 1521 LK_Shared, Loc), 1522 CapDiagKind); 1523 } 1524 1525 namespace { 1526 1527 /// We use this class to visit different types of expressions in 1528 /// CFGBlocks, and build up the lockset. 1529 /// An expression may cause us to add or remove locks from the lockset, or else 1530 /// output error messages related to missing locks. 1531 /// FIXME: In future, we may be able to not inherit from a visitor. 1532 class BuildLockset : public ConstStmtVisitor<BuildLockset> { 1533 friend class ThreadSafetyAnalyzer; 1534 1535 ThreadSafetyAnalyzer *Analyzer; 1536 FactSet FSet; 1537 LocalVariableMap::Context LVarCtx; 1538 unsigned CtxIndex; 1539 1540 // helper functions 1541 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, 1542 Expr *MutexExp, ProtectedOperationKind POK, 1543 StringRef DiagKind, SourceLocation Loc); 1544 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, 1545 StringRef DiagKind); 1546 1547 void checkAccess(const Expr *Exp, AccessKind AK, 1548 ProtectedOperationKind POK = POK_VarAccess); 1549 void checkPtAccess(const Expr *Exp, AccessKind AK, 1550 ProtectedOperationKind POK = POK_VarAccess); 1551 1552 void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr); 1553 void examineArguments(const FunctionDecl *FD, 1554 CallExpr::const_arg_iterator ArgBegin, 1555 CallExpr::const_arg_iterator ArgEnd, 1556 bool SkipFirstParam = false); 1557 1558 public: 1559 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1560 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), 1561 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {} 1562 1563 void VisitUnaryOperator(const UnaryOperator *UO); 1564 void VisitBinaryOperator(const BinaryOperator *BO); 1565 void VisitCastExpr(const CastExpr *CE); 1566 void VisitCallExpr(const CallExpr *Exp); 1567 void VisitCXXConstructExpr(const CXXConstructExpr *Exp); 1568 void VisitDeclStmt(const DeclStmt *S); 1569 }; 1570 1571 } // namespace 1572 1573 /// Warn if the LSet does not contain a lock sufficient to protect access 1574 /// of at least the passed in AccessKind. 1575 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, 1576 AccessKind AK, Expr *MutexExp, 1577 ProtectedOperationKind POK, 1578 StringRef DiagKind, SourceLocation Loc) { 1579 LockKind LK = getLockKindFromAccessKind(AK); 1580 1581 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1582 if (Cp.isInvalid()) { 1583 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1584 return; 1585 } else if (Cp.shouldIgnore()) { 1586 return; 1587 } 1588 1589 if (Cp.negative()) { 1590 // Negative capabilities act like locks excluded 1591 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp); 1592 if (LDat) { 1593 Analyzer->Handler.handleFunExcludesLock( 1594 DiagKind, D->getNameAsString(), (!Cp).toString(), Loc); 1595 return; 1596 } 1597 1598 // If this does not refer to a negative capability in the same class, 1599 // then stop here. 1600 if (!Analyzer->inCurrentScope(Cp)) 1601 return; 1602 1603 // Otherwise the negative requirement must be propagated to the caller. 1604 LDat = FSet.findLock(Analyzer->FactMan, Cp); 1605 if (!LDat) { 1606 Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(), 1607 LK_Shared, Loc); 1608 } 1609 return; 1610 } 1611 1612 const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp); 1613 bool NoError = true; 1614 if (!LDat) { 1615 // No exact match found. Look for a partial match. 1616 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp); 1617 if (LDat) { 1618 // Warn that there's no precise match. 1619 std::string PartMatchStr = LDat->toString(); 1620 StringRef PartMatchName(PartMatchStr); 1621 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1622 LK, Loc, &PartMatchName); 1623 } else { 1624 // Warn that there's no match at all. 1625 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1626 LK, Loc); 1627 } 1628 NoError = false; 1629 } 1630 // Make sure the mutex we found is the right kind. 1631 if (NoError && LDat && !LDat->isAtLeast(LK)) { 1632 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1633 LK, Loc); 1634 } 1635 } 1636 1637 /// Warn if the LSet contains the given lock. 1638 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, 1639 Expr *MutexExp, StringRef DiagKind) { 1640 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1641 if (Cp.isInvalid()) { 1642 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1643 return; 1644 } else if (Cp.shouldIgnore()) { 1645 return; 1646 } 1647 1648 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp); 1649 if (LDat) { 1650 Analyzer->Handler.handleFunExcludesLock( 1651 DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc()); 1652 } 1653 } 1654 1655 /// Checks guarded_by and pt_guarded_by attributes. 1656 /// Whenever we identify an access (read or write) to a DeclRefExpr that is 1657 /// marked with guarded_by, we must ensure the appropriate mutexes are held. 1658 /// Similarly, we check if the access is to an expression that dereferences 1659 /// a pointer marked with pt_guarded_by. 1660 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK, 1661 ProtectedOperationKind POK) { 1662 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); 1663 1664 SourceLocation Loc = Exp->getExprLoc(); 1665 1666 // Local variables of reference type cannot be re-assigned; 1667 // map them to their initializer. 1668 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { 1669 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); 1670 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { 1671 if (const auto *E = VD->getInit()) { 1672 // Guard against self-initialization. e.g., int &i = i; 1673 if (E == Exp) 1674 break; 1675 Exp = E; 1676 continue; 1677 } 1678 } 1679 break; 1680 } 1681 1682 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { 1683 // For dereferences 1684 if (UO->getOpcode() == UO_Deref) 1685 checkPtAccess(UO->getSubExpr(), AK, POK); 1686 return; 1687 } 1688 1689 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { 1690 checkPtAccess(AE->getLHS(), AK, POK); 1691 return; 1692 } 1693 1694 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { 1695 if (ME->isArrow()) 1696 checkPtAccess(ME->getBase(), AK, POK); 1697 else 1698 checkAccess(ME->getBase(), AK, POK); 1699 } 1700 1701 const ValueDecl *D = getValueDecl(Exp); 1702 if (!D || !D->hasAttrs()) 1703 return; 1704 1705 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) { 1706 Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc); 1707 } 1708 1709 for (const auto *I : D->specific_attrs<GuardedByAttr>()) 1710 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, 1711 ClassifyDiagnostic(I), Loc); 1712 } 1713 1714 /// Checks pt_guarded_by and pt_guarded_var attributes. 1715 /// POK is the same operationKind that was passed to checkAccess. 1716 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK, 1717 ProtectedOperationKind POK) { 1718 while (true) { 1719 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { 1720 Exp = PE->getSubExpr(); 1721 continue; 1722 } 1723 if (const auto *CE = dyn_cast<CastExpr>(Exp)) { 1724 if (CE->getCastKind() == CK_ArrayToPointerDecay) { 1725 // If it's an actual array, and not a pointer, then it's elements 1726 // are protected by GUARDED_BY, not PT_GUARDED_BY; 1727 checkAccess(CE->getSubExpr(), AK, POK); 1728 return; 1729 } 1730 Exp = CE->getSubExpr(); 1731 continue; 1732 } 1733 break; 1734 } 1735 1736 // Pass by reference warnings are under a different flag. 1737 ProtectedOperationKind PtPOK = POK_VarDereference; 1738 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; 1739 1740 const ValueDecl *D = getValueDecl(Exp); 1741 if (!D || !D->hasAttrs()) 1742 return; 1743 1744 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) 1745 Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK, 1746 Exp->getExprLoc()); 1747 1748 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) 1749 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, 1750 ClassifyDiagnostic(I), Exp->getExprLoc()); 1751 } 1752 1753 /// Process a function call, method call, constructor call, 1754 /// or destructor call. This involves looking at the attributes on the 1755 /// corresponding function/method/constructor/destructor, issuing warnings, 1756 /// and updating the locksets accordingly. 1757 /// 1758 /// FIXME: For classes annotated with one of the guarded annotations, we need 1759 /// to treat const method calls as reads and non-const method calls as writes, 1760 /// and check that the appropriate locks are held. Non-const method calls with 1761 /// the same signature as const method calls can be also treated as reads. 1762 /// 1763 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, 1764 VarDecl *VD) { 1765 SourceLocation Loc = Exp->getExprLoc(); 1766 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; 1767 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; 1768 CapExprSet ScopedExclusiveReqs, ScopedSharedReqs; 1769 StringRef CapDiagKind = "mutex"; 1770 1771 // Figure out if we're constructing an object of scoped lockable class 1772 bool isScopedVar = false; 1773 if (VD) { 1774 if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) { 1775 const CXXRecordDecl* PD = CD->getParent(); 1776 if (PD && PD->hasAttr<ScopedLockableAttr>()) 1777 isScopedVar = true; 1778 } 1779 } 1780 1781 for(const Attr *At : D->attrs()) { 1782 switch (At->getKind()) { 1783 // When we encounter a lock function, we need to add the lock to our 1784 // lockset. 1785 case attr::AcquireCapability: { 1786 const auto *A = cast<AcquireCapabilityAttr>(At); 1787 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1788 : ExclusiveLocksToAdd, 1789 A, Exp, D, VD); 1790 1791 CapDiagKind = ClassifyDiagnostic(A); 1792 break; 1793 } 1794 1795 // An assert will add a lock to the lockset, but will not generate 1796 // a warning if it is already there, and will not generate a warning 1797 // if it is not removed. 1798 case attr::AssertExclusiveLock: { 1799 const auto *A = cast<AssertExclusiveLockAttr>(At); 1800 1801 CapExprSet AssertLocks; 1802 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1803 for (const auto &AssertLock : AssertLocks) 1804 Analyzer->addLock(FSet, 1805 llvm::make_unique<LockableFactEntry>( 1806 AssertLock, LK_Exclusive, Loc, false, true), 1807 ClassifyDiagnostic(A)); 1808 break; 1809 } 1810 case attr::AssertSharedLock: { 1811 const auto *A = cast<AssertSharedLockAttr>(At); 1812 1813 CapExprSet AssertLocks; 1814 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1815 for (const auto &AssertLock : AssertLocks) 1816 Analyzer->addLock(FSet, 1817 llvm::make_unique<LockableFactEntry>( 1818 AssertLock, LK_Shared, Loc, false, true), 1819 ClassifyDiagnostic(A)); 1820 break; 1821 } 1822 1823 case attr::AssertCapability: { 1824 const auto *A = cast<AssertCapabilityAttr>(At); 1825 CapExprSet AssertLocks; 1826 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1827 for (const auto &AssertLock : AssertLocks) 1828 Analyzer->addLock(FSet, 1829 llvm::make_unique<LockableFactEntry>( 1830 AssertLock, 1831 A->isShared() ? LK_Shared : LK_Exclusive, Loc, 1832 false, true), 1833 ClassifyDiagnostic(A)); 1834 break; 1835 } 1836 1837 // When we encounter an unlock function, we need to remove unlocked 1838 // mutexes from the lockset, and flag a warning if they are not there. 1839 case attr::ReleaseCapability: { 1840 const auto *A = cast<ReleaseCapabilityAttr>(At); 1841 if (A->isGeneric()) 1842 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD); 1843 else if (A->isShared()) 1844 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD); 1845 else 1846 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD); 1847 1848 CapDiagKind = ClassifyDiagnostic(A); 1849 break; 1850 } 1851 1852 case attr::RequiresCapability: { 1853 const auto *A = cast<RequiresCapabilityAttr>(At); 1854 for (auto *Arg : A->args()) { 1855 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, 1856 POK_FunctionCall, ClassifyDiagnostic(A), 1857 Exp->getExprLoc()); 1858 // use for adopting a lock 1859 if (isScopedVar) { 1860 Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs 1861 : ScopedExclusiveReqs, 1862 A, Exp, D, VD); 1863 } 1864 } 1865 break; 1866 } 1867 1868 case attr::LocksExcluded: { 1869 const auto *A = cast<LocksExcludedAttr>(At); 1870 for (auto *Arg : A->args()) 1871 warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A)); 1872 break; 1873 } 1874 1875 // Ignore attributes unrelated to thread-safety 1876 default: 1877 break; 1878 } 1879 } 1880 1881 // Remove locks first to allow lock upgrading/downgrading. 1882 // FIXME -- should only fully remove if the attribute refers to 'this'. 1883 bool Dtor = isa<CXXDestructorDecl>(D); 1884 for (const auto &M : ExclusiveLocksToRemove) 1885 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind); 1886 for (const auto &M : SharedLocksToRemove) 1887 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind); 1888 for (const auto &M : GenericLocksToRemove) 1889 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind); 1890 1891 // Add locks. 1892 for (const auto &M : ExclusiveLocksToAdd) 1893 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( 1894 M, LK_Exclusive, Loc, isScopedVar), 1895 CapDiagKind); 1896 for (const auto &M : SharedLocksToAdd) 1897 Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( 1898 M, LK_Shared, Loc, isScopedVar), 1899 CapDiagKind); 1900 1901 if (isScopedVar) { 1902 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 1903 SourceLocation MLoc = VD->getLocation(); 1904 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); 1905 // FIXME: does this store a pointer to DRE? 1906 CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); 1907 1908 std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(), 1909 std::back_inserter(ExclusiveLocksToAdd)); 1910 std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(), 1911 std::back_inserter(SharedLocksToAdd)); 1912 Analyzer->addLock(FSet, 1913 llvm::make_unique<ScopedLockableFactEntry>( 1914 Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd), 1915 CapDiagKind); 1916 } 1917 } 1918 1919 /// For unary operations which read and write a variable, we need to 1920 /// check whether we hold any required mutexes. Reads are checked in 1921 /// VisitCastExpr. 1922 void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { 1923 switch (UO->getOpcode()) { 1924 case UO_PostDec: 1925 case UO_PostInc: 1926 case UO_PreDec: 1927 case UO_PreInc: 1928 checkAccess(UO->getSubExpr(), AK_Written); 1929 break; 1930 default: 1931 break; 1932 } 1933 } 1934 1935 /// For binary operations which assign to a variable (writes), we need to check 1936 /// whether we hold any required mutexes. 1937 /// FIXME: Deal with non-primitive types. 1938 void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { 1939 if (!BO->isAssignmentOp()) 1940 return; 1941 1942 // adjust the context 1943 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 1944 1945 checkAccess(BO->getLHS(), AK_Written); 1946 } 1947 1948 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and 1949 /// need to ensure we hold any required mutexes. 1950 /// FIXME: Deal with non-primitive types. 1951 void BuildLockset::VisitCastExpr(const CastExpr *CE) { 1952 if (CE->getCastKind() != CK_LValueToRValue) 1953 return; 1954 checkAccess(CE->getSubExpr(), AK_Read); 1955 } 1956 1957 void BuildLockset::examineArguments(const FunctionDecl *FD, 1958 CallExpr::const_arg_iterator ArgBegin, 1959 CallExpr::const_arg_iterator ArgEnd, 1960 bool SkipFirstParam) { 1961 // Currently we can't do anything if we don't know the function declaration. 1962 if (!FD) 1963 return; 1964 1965 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it 1966 // only turns off checking within the body of a function, but we also 1967 // use it to turn off checking in arguments to the function. This 1968 // could result in some false negatives, but the alternative is to 1969 // create yet another attribute. 1970 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>()) 1971 return; 1972 1973 const ArrayRef<ParmVarDecl *> Params = FD->parameters(); 1974 auto Param = Params.begin(); 1975 if (SkipFirstParam) 1976 ++Param; 1977 1978 // There can be default arguments, so we stop when one iterator is at end(). 1979 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd; 1980 ++Param, ++Arg) { 1981 QualType Qt = (*Param)->getType(); 1982 if (Qt->isReferenceType()) 1983 checkAccess(*Arg, AK_Read, POK_PassByRef); 1984 } 1985 } 1986 1987 void BuildLockset::VisitCallExpr(const CallExpr *Exp) { 1988 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 1989 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); 1990 // ME can be null when calling a method pointer 1991 const CXXMethodDecl *MD = CE->getMethodDecl(); 1992 1993 if (ME && MD) { 1994 if (ME->isArrow()) { 1995 if (MD->isConst()) 1996 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 1997 else // FIXME -- should be AK_Written 1998 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 1999 } else { 2000 if (MD->isConst()) 2001 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2002 else // FIXME -- should be AK_Written 2003 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2004 } 2005 } 2006 2007 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end()); 2008 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 2009 auto OEop = OE->getOperator(); 2010 switch (OEop) { 2011 case OO_Equal: { 2012 const Expr *Target = OE->getArg(0); 2013 const Expr *Source = OE->getArg(1); 2014 checkAccess(Target, AK_Written); 2015 checkAccess(Source, AK_Read); 2016 break; 2017 } 2018 case OO_Star: 2019 case OO_Arrow: 2020 case OO_Subscript: 2021 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { 2022 // Grrr. operator* can be multiplication... 2023 checkPtAccess(OE->getArg(0), AK_Read); 2024 } 2025 LLVM_FALLTHROUGH; 2026 default: { 2027 // TODO: get rid of this, and rely on pass-by-ref instead. 2028 const Expr *Obj = OE->getArg(0); 2029 checkAccess(Obj, AK_Read); 2030 // Check the remaining arguments. For method operators, the first 2031 // argument is the implicit self argument, and doesn't appear in the 2032 // FunctionDecl, but for non-methods it does. 2033 const FunctionDecl *FD = OE->getDirectCallee(); 2034 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(), 2035 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD)); 2036 break; 2037 } 2038 } 2039 } else { 2040 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end()); 2041 } 2042 2043 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2044 if(!D || !D->hasAttrs()) 2045 return; 2046 handleCall(Exp, D); 2047 } 2048 2049 void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { 2050 const CXXConstructorDecl *D = Exp->getConstructor(); 2051 if (D && D->isCopyConstructor()) { 2052 const Expr* Source = Exp->getArg(0); 2053 checkAccess(Source, AK_Read); 2054 } else { 2055 examineArguments(D, Exp->arg_begin(), Exp->arg_end()); 2056 } 2057 } 2058 2059 static CXXConstructorDecl * 2060 findConstructorForByValueReturn(const CXXRecordDecl *RD) { 2061 // Prefer a move constructor over a copy constructor. If there's more than 2062 // one copy constructor or more than one move constructor, we arbitrarily 2063 // pick the first declared such constructor rather than trying to guess which 2064 // one is more appropriate. 2065 CXXConstructorDecl *CopyCtor = nullptr; 2066 for (auto *Ctor : RD->ctors()) { 2067 if (Ctor->isDeleted()) 2068 continue; 2069 if (Ctor->isMoveConstructor()) 2070 return Ctor; 2071 if (!CopyCtor && Ctor->isCopyConstructor()) 2072 CopyCtor = Ctor; 2073 } 2074 return CopyCtor; 2075 } 2076 2077 static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args, 2078 SourceLocation Loc) { 2079 ASTContext &Ctx = CD->getASTContext(); 2080 return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc, 2081 CD, true, Args, false, false, false, false, 2082 CXXConstructExpr::CK_Complete, 2083 SourceRange(Loc, Loc)); 2084 } 2085 2086 void BuildLockset::VisitDeclStmt(const DeclStmt *S) { 2087 // adjust the context 2088 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2089 2090 for (auto *D : S->getDeclGroup()) { 2091 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { 2092 Expr *E = VD->getInit(); 2093 if (!E) 2094 continue; 2095 E = E->IgnoreParens(); 2096 2097 // handle constructors that involve temporaries 2098 if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) 2099 E = EWC->getSubExpr(); 2100 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) 2101 E = BTE->getSubExpr(); 2102 2103 if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) { 2104 const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2105 if (!CtorD || !CtorD->hasAttrs()) 2106 continue; 2107 handleCall(E, CtorD, VD); 2108 } else if (isa<CallExpr>(E) && E->isRValue()) { 2109 // If the object is initialized by a function call that returns a 2110 // scoped lockable by value, use the attributes on the copy or move 2111 // constructor to figure out what effect that should have on the 2112 // lockset. 2113 // FIXME: Is this really the best way to handle this situation? 2114 auto *RD = E->getType()->getAsCXXRecordDecl(); 2115 if (!RD || !RD->hasAttr<ScopedLockableAttr>()) 2116 continue; 2117 CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD); 2118 if (!CtorD || !CtorD->hasAttrs()) 2119 continue; 2120 handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD); 2121 } 2122 } 2123 } 2124 } 2125 2126 /// Compute the intersection of two locksets and issue warnings for any 2127 /// locks in the symmetric difference. 2128 /// 2129 /// This function is used at a merge point in the CFG when comparing the lockset 2130 /// of each branch being merged. For example, given the following sequence: 2131 /// A; if () then B; else C; D; we need to check that the lockset after B and C 2132 /// are the same. In the event of a difference, we use the intersection of these 2133 /// two locksets at the start of D. 2134 /// 2135 /// \param FSet1 The first lockset. 2136 /// \param FSet2 The second lockset. 2137 /// \param JoinLoc The location of the join point for error reporting 2138 /// \param LEK1 The error message to report if a mutex is missing from LSet1 2139 /// \param LEK2 The error message to report if a mutex is missing from Lset2 2140 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, 2141 const FactSet &FSet2, 2142 SourceLocation JoinLoc, 2143 LockErrorKind LEK1, 2144 LockErrorKind LEK2, 2145 bool Modify) { 2146 FactSet FSet1Orig = FSet1; 2147 2148 // Find locks in FSet2 that conflict or are not in FSet1, and warn. 2149 for (const auto &Fact : FSet2) { 2150 const FactEntry *LDat1 = nullptr; 2151 const FactEntry *LDat2 = &FactMan[Fact]; 2152 FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2); 2153 if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1]; 2154 2155 if (LDat1) { 2156 if (LDat1->kind() != LDat2->kind()) { 2157 Handler.handleExclusiveAndShared("mutex", LDat2->toString(), 2158 LDat2->loc(), LDat1->loc()); 2159 if (Modify && LDat1->kind() != LK_Exclusive) { 2160 // Take the exclusive lock, which is the one in FSet2. 2161 *Iter1 = Fact; 2162 } 2163 } 2164 else if (Modify && LDat1->asserted() && !LDat2->asserted()) { 2165 // The non-asserted lock in FSet2 is the one we want to track. 2166 *Iter1 = Fact; 2167 } 2168 } else { 2169 LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1, 2170 Handler); 2171 } 2172 } 2173 2174 // Find locks in FSet1 that are not in FSet2, and remove them. 2175 for (const auto &Fact : FSet1Orig) { 2176 const FactEntry *LDat1 = &FactMan[Fact]; 2177 const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1); 2178 2179 if (!LDat2) { 2180 LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2, 2181 Handler); 2182 if (Modify) 2183 FSet1.removeLock(FactMan, *LDat1); 2184 } 2185 } 2186 } 2187 2188 // Return true if block B never continues to its successors. 2189 static bool neverReturns(const CFGBlock *B) { 2190 if (B->hasNoReturnElement()) 2191 return true; 2192 if (B->empty()) 2193 return false; 2194 2195 CFGElement Last = B->back(); 2196 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2197 if (isa<CXXThrowExpr>(S->getStmt())) 2198 return true; 2199 } 2200 return false; 2201 } 2202 2203 /// Check a function's CFG for thread-safety violations. 2204 /// 2205 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2206 /// at the end of each block, and issue warnings for thread safety violations. 2207 /// Each block in the CFG is traversed exactly once. 2208 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2209 // TODO: this whole function needs be rewritten as a visitor for CFGWalker. 2210 // For now, we just use the walker to set things up. 2211 threadSafety::CFGWalker walker; 2212 if (!walker.init(AC)) 2213 return; 2214 2215 // AC.dumpCFG(true); 2216 // threadSafety::printSCFG(walker); 2217 2218 CFG *CFGraph = walker.getGraph(); 2219 const NamedDecl *D = walker.getDecl(); 2220 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D); 2221 CurrentMethod = dyn_cast<CXXMethodDecl>(D); 2222 2223 if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) 2224 return; 2225 2226 // FIXME: Do something a bit more intelligent inside constructor and 2227 // destructor code. Constructors and destructors must assume unique access 2228 // to 'this', so checks on member variable access is disabled, but we should 2229 // still enable checks on other objects. 2230 if (isa<CXXConstructorDecl>(D)) 2231 return; // Don't check inside constructors. 2232 if (isa<CXXDestructorDecl>(D)) 2233 return; // Don't check inside destructors. 2234 2235 Handler.enterFunction(CurrentFunction); 2236 2237 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2238 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2239 2240 // We need to explore the CFG via a "topological" ordering. 2241 // That way, we will be guaranteed to have information about required 2242 // predecessor locksets when exploring a new block. 2243 const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); 2244 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2245 2246 // Mark entry block as reachable 2247 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; 2248 2249 // Compute SSA names for local variables 2250 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2251 2252 // Fill in source locations for all CFGBlocks. 2253 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2254 2255 CapExprSet ExclusiveLocksAcquired; 2256 CapExprSet SharedLocksAcquired; 2257 CapExprSet LocksReleased; 2258 2259 // Add locks from exclusive_locks_required and shared_locks_required 2260 // to initial lockset. Also turn off checking for lock and unlock functions. 2261 // FIXME: is there a more intelligent way to check lock/unlock functions? 2262 if (!SortedGraph->empty() && D->hasAttrs()) { 2263 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2264 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2265 2266 CapExprSet ExclusiveLocksToAdd; 2267 CapExprSet SharedLocksToAdd; 2268 StringRef CapDiagKind = "mutex"; 2269 2270 SourceLocation Loc = D->getLocation(); 2271 for (const auto *Attr : D->attrs()) { 2272 Loc = Attr->getLocation(); 2273 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2274 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2275 nullptr, D); 2276 CapDiagKind = ClassifyDiagnostic(A); 2277 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2278 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2279 // We must ignore such methods. 2280 if (A->args_size() == 0) 2281 return; 2282 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2283 nullptr, D); 2284 getMutexIDs(LocksReleased, A, nullptr, D); 2285 CapDiagKind = ClassifyDiagnostic(A); 2286 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2287 if (A->args_size() == 0) 2288 return; 2289 getMutexIDs(A->isShared() ? SharedLocksAcquired 2290 : ExclusiveLocksAcquired, 2291 A, nullptr, D); 2292 CapDiagKind = ClassifyDiagnostic(A); 2293 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2294 // Don't try to check trylock functions for now. 2295 return; 2296 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2297 // Don't try to check trylock functions for now. 2298 return; 2299 } else if (isa<TryAcquireCapabilityAttr>(Attr)) { 2300 // Don't try to check trylock functions for now. 2301 return; 2302 } 2303 } 2304 2305 // FIXME -- Loc can be wrong here. 2306 for (const auto &Mu : ExclusiveLocksToAdd) { 2307 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc); 2308 Entry->setDeclared(true); 2309 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2310 } 2311 for (const auto &Mu : SharedLocksToAdd) { 2312 auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc); 2313 Entry->setDeclared(true); 2314 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2315 } 2316 } 2317 2318 for (const auto *CurrBlock : *SortedGraph) { 2319 unsigned CurrBlockID = CurrBlock->getBlockID(); 2320 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2321 2322 // Use the default initial lockset in case there are no predecessors. 2323 VisitedBlocks.insert(CurrBlock); 2324 2325 // Iterate through the predecessor blocks and warn if the lockset for all 2326 // predecessors is not the same. We take the entry lockset of the current 2327 // block to be the intersection of all previous locksets. 2328 // FIXME: By keeping the intersection, we may output more errors in future 2329 // for a lock which is not in the intersection, but was in the union. We 2330 // may want to also keep the union in future. As an example, let's say 2331 // the intersection contains Mutex L, and the union contains L and M. 2332 // Later we unlock M. At this point, we would output an error because we 2333 // never locked M; although the real error is probably that we forgot to 2334 // lock M on all code paths. Conversely, let's say that later we lock M. 2335 // In this case, we should compare against the intersection instead of the 2336 // union because the real error is probably that we forgot to unlock M on 2337 // all code paths. 2338 bool LocksetInitialized = false; 2339 SmallVector<CFGBlock *, 8> SpecialBlocks; 2340 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2341 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2342 // if *PI -> CurrBlock is a back edge 2343 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) 2344 continue; 2345 2346 unsigned PrevBlockID = (*PI)->getBlockID(); 2347 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2348 2349 // Ignore edges from blocks that can't return. 2350 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2351 continue; 2352 2353 // Okay, we can reach this block from the entry. 2354 CurrBlockInfo->Reachable = true; 2355 2356 // If the previous block ended in a 'continue' or 'break' statement, then 2357 // a difference in locksets is probably due to a bug in that block, rather 2358 // than in some other predecessor. In that case, keep the other 2359 // predecessor's lockset. 2360 if (const Stmt *Terminator = (*PI)->getTerminator()) { 2361 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 2362 SpecialBlocks.push_back(*PI); 2363 continue; 2364 } 2365 } 2366 2367 FactSet PrevLockset; 2368 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2369 2370 if (!LocksetInitialized) { 2371 CurrBlockInfo->EntrySet = PrevLockset; 2372 LocksetInitialized = true; 2373 } else { 2374 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2375 CurrBlockInfo->EntryLoc, 2376 LEK_LockedSomePredecessors); 2377 } 2378 } 2379 2380 // Skip rest of block if it's not reachable. 2381 if (!CurrBlockInfo->Reachable) 2382 continue; 2383 2384 // Process continue and break blocks. Assume that the lockset for the 2385 // resulting block is unaffected by any discrepancies in them. 2386 for (const auto *PrevBlock : SpecialBlocks) { 2387 unsigned PrevBlockID = PrevBlock->getBlockID(); 2388 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2389 2390 if (!LocksetInitialized) { 2391 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 2392 LocksetInitialized = true; 2393 } else { 2394 // Determine whether this edge is a loop terminator for diagnostic 2395 // purposes. FIXME: A 'break' statement might be a loop terminator, but 2396 // it might also be part of a switch. Also, a subsequent destructor 2397 // might add to the lockset, in which case the real issue might be a 2398 // double lock on the other path. 2399 const Stmt *Terminator = PrevBlock->getTerminator(); 2400 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 2401 2402 FactSet PrevLockset; 2403 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, 2404 PrevBlock, CurrBlock); 2405 2406 // Do not update EntrySet. 2407 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2408 PrevBlockInfo->ExitLoc, 2409 IsLoop ? LEK_LockedSomeLoopIterations 2410 : LEK_LockedSomePredecessors, 2411 false); 2412 } 2413 } 2414 2415 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2416 2417 // Visit all the statements in the basic block. 2418 for (const auto &BI : *CurrBlock) { 2419 switch (BI.getKind()) { 2420 case CFGElement::Statement: { 2421 CFGStmt CS = BI.castAs<CFGStmt>(); 2422 LocksetBuilder.Visit(CS.getStmt()); 2423 break; 2424 } 2425 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2426 case CFGElement::AutomaticObjectDtor: { 2427 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); 2428 const auto *DD = AD.getDestructorDecl(AC.getASTContext()); 2429 if (!DD->hasAttrs()) 2430 break; 2431 2432 // Create a dummy expression, 2433 auto *VD = const_cast<VarDecl *>(AD.getVarDecl()); 2434 DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(), 2435 VK_LValue, AD.getTriggerStmt()->getEndLoc()); 2436 LocksetBuilder.handleCall(&DRE, DD); 2437 break; 2438 } 2439 default: 2440 break; 2441 } 2442 } 2443 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2444 2445 // For every back edge from CurrBlock (the end of the loop) to another block 2446 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2447 // the one held at the beginning of FirstLoopBlock. We can look up the 2448 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2449 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2450 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2451 // if CurrBlock -> *SI is *not* a back edge 2452 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 2453 continue; 2454 2455 CFGBlock *FirstLoopBlock = *SI; 2456 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2457 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2458 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, 2459 PreLoop->EntryLoc, 2460 LEK_LockedSomeLoopIterations, 2461 false); 2462 } 2463 } 2464 2465 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2466 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2467 2468 // Skip the final check if the exit block is unreachable. 2469 if (!Final->Reachable) 2470 return; 2471 2472 // By default, we expect all locks held on entry to be held on exit. 2473 FactSet ExpectedExitSet = Initial->EntrySet; 2474 2475 // Adjust the expected exit set by adding or removing locks, as declared 2476 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2477 // issue the appropriate warning. 2478 // FIXME: the location here is not quite right. 2479 for (const auto &Lock : ExclusiveLocksAcquired) 2480 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 2481 Lock, LK_Exclusive, D->getLocation())); 2482 for (const auto &Lock : SharedLocksAcquired) 2483 ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>( 2484 Lock, LK_Shared, D->getLocation())); 2485 for (const auto &Lock : LocksReleased) 2486 ExpectedExitSet.removeLock(FactMan, Lock); 2487 2488 // FIXME: Should we call this function for all blocks which exit the function? 2489 intersectAndWarn(ExpectedExitSet, Final->ExitSet, 2490 Final->ExitLoc, 2491 LEK_LockedAtEndOfFunction, 2492 LEK_NotLockedAtEndOfFunction, 2493 false); 2494 2495 Handler.leaveFunction(CurrentFunction); 2496 } 2497 2498 /// Check a function's CFG for thread-safety violations. 2499 /// 2500 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2501 /// at the end of each block, and issue warnings for thread safety violations. 2502 /// Each block in the CFG is traversed exactly once. 2503 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2504 ThreadSafetyHandler &Handler, 2505 BeforeSet **BSet) { 2506 if (!*BSet) 2507 *BSet = new BeforeSet; 2508 ThreadSafetyAnalyzer Analyzer(Handler, *BSet); 2509 Analyzer.runAnalysis(AC); 2510 } 2511 2512 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } 2513 2514 /// Helper function that returns a LockKind required for the given level 2515 /// of access. 2516 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { 2517 switch (AK) { 2518 case AK_Read : 2519 return LK_Shared; 2520 case AK_Written : 2521 return LK_Exclusive; 2522 } 2523 llvm_unreachable("Unknown AccessKind"); 2524 } 2525