1 //===- ThreadSafety.cpp ---------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // A intra-procedural analysis for thread safety (e.g. deadlocks and race 10 // conditions), based off of an annotation system. 11 // 12 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html 13 // for more information. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "clang/Analysis/Analyses/ThreadSafety.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/DeclGroup.h" 22 #include "clang/AST/Expr.h" 23 #include "clang/AST/ExprCXX.h" 24 #include "clang/AST/OperationKinds.h" 25 #include "clang/AST/Stmt.h" 26 #include "clang/AST/StmtVisitor.h" 27 #include "clang/AST/Type.h" 28 #include "clang/Analysis/Analyses/PostOrderCFGView.h" 29 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" 30 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" 31 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" 32 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h" 33 #include "clang/Analysis/AnalysisDeclContext.h" 34 #include "clang/Analysis/CFG.h" 35 #include "clang/Basic/Builtins.h" 36 #include "clang/Basic/LLVM.h" 37 #include "clang/Basic/OperatorKinds.h" 38 #include "clang/Basic/SourceLocation.h" 39 #include "clang/Basic/Specifiers.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/DenseMap.h" 42 #include "llvm/ADT/ImmutableMap.h" 43 #include "llvm/ADT/Optional.h" 44 #include "llvm/ADT/PointerIntPair.h" 45 #include "llvm/ADT/STLExtras.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/StringRef.h" 48 #include "llvm/Support/Allocator.h" 49 #include "llvm/Support/Casting.h" 50 #include "llvm/Support/ErrorHandling.h" 51 #include "llvm/Support/raw_ostream.h" 52 #include <algorithm> 53 #include <cassert> 54 #include <functional> 55 #include <iterator> 56 #include <memory> 57 #include <string> 58 #include <type_traits> 59 #include <utility> 60 #include <vector> 61 62 using namespace clang; 63 using namespace threadSafety; 64 65 // Key method definition 66 ThreadSafetyHandler::~ThreadSafetyHandler() = default; 67 68 /// Issue a warning about an invalid lock expression 69 static void warnInvalidLock(ThreadSafetyHandler &Handler, 70 const Expr *MutexExp, const NamedDecl *D, 71 const Expr *DeclExp, StringRef Kind) { 72 SourceLocation Loc; 73 if (DeclExp) 74 Loc = DeclExp->getExprLoc(); 75 76 // FIXME: add a note about the attribute location in MutexExp or D 77 if (Loc.isValid()) 78 Handler.handleInvalidLockExp(Kind, Loc); 79 } 80 81 namespace { 82 83 /// A set of CapabilityExpr objects, which are compiled from thread safety 84 /// attributes on a function. 85 class CapExprSet : public SmallVector<CapabilityExpr, 4> { 86 public: 87 /// Push M onto list, but discard duplicates. 88 void push_back_nodup(const CapabilityExpr &CapE) { 89 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) { 90 return CapE.equals(CapE2); 91 })) 92 push_back(CapE); 93 } 94 }; 95 96 class FactManager; 97 class FactSet; 98 99 /// This is a helper class that stores a fact that is known at a 100 /// particular point in program execution. Currently, a fact is a capability, 101 /// along with additional information, such as where it was acquired, whether 102 /// it is exclusive or shared, etc. 103 /// 104 /// FIXME: this analysis does not currently support re-entrant locking. 105 class FactEntry : public CapabilityExpr { 106 public: 107 /// Where a fact comes from. 108 enum SourceKind { 109 Acquired, ///< The fact has been directly acquired. 110 Asserted, ///< The fact has been asserted to be held. 111 Declared, ///< The fact is assumed to be held by callers. 112 Managed, ///< The fact has been acquired through a scoped capability. 113 }; 114 115 private: 116 /// Exclusive or shared. 117 LockKind LKind : 8; 118 119 // How it was acquired. 120 SourceKind Source : 8; 121 122 /// Where it was acquired. 123 SourceLocation AcquireLoc; 124 125 public: 126 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 127 SourceKind Src) 128 : CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {} 129 virtual ~FactEntry() = default; 130 131 LockKind kind() const { return LKind; } 132 SourceLocation loc() const { return AcquireLoc; } 133 134 bool asserted() const { return Source == Asserted; } 135 bool declared() const { return Source == Declared; } 136 bool managed() const { return Source == Managed; } 137 138 virtual void 139 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 140 SourceLocation JoinLoc, LockErrorKind LEK, 141 ThreadSafetyHandler &Handler) const = 0; 142 virtual void handleLock(FactSet &FSet, FactManager &FactMan, 143 const FactEntry &entry, ThreadSafetyHandler &Handler, 144 StringRef DiagKind) const = 0; 145 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, 146 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 147 bool FullyRemove, ThreadSafetyHandler &Handler, 148 StringRef DiagKind) const = 0; 149 150 // Return true if LKind >= LK, where exclusive > shared 151 bool isAtLeast(LockKind LK) const { 152 return (LKind == LK_Exclusive) || (LK == LK_Shared); 153 } 154 }; 155 156 using FactID = unsigned short; 157 158 /// FactManager manages the memory for all facts that are created during 159 /// the analysis of a single routine. 160 class FactManager { 161 private: 162 std::vector<std::unique_ptr<const FactEntry>> Facts; 163 164 public: 165 FactID newFact(std::unique_ptr<FactEntry> Entry) { 166 Facts.push_back(std::move(Entry)); 167 return static_cast<unsigned short>(Facts.size() - 1); 168 } 169 170 const FactEntry &operator[](FactID F) const { return *Facts[F]; } 171 }; 172 173 /// A FactSet is the set of facts that are known to be true at a 174 /// particular program point. FactSets must be small, because they are 175 /// frequently copied, and are thus implemented as a set of indices into a 176 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 177 /// locks, so we can get away with doing a linear search for lookup. Note 178 /// that a hashtable or map is inappropriate in this case, because lookups 179 /// may involve partial pattern matches, rather than exact matches. 180 class FactSet { 181 private: 182 using FactVec = SmallVector<FactID, 4>; 183 184 FactVec FactIDs; 185 186 public: 187 using iterator = FactVec::iterator; 188 using const_iterator = FactVec::const_iterator; 189 190 iterator begin() { return FactIDs.begin(); } 191 const_iterator begin() const { return FactIDs.begin(); } 192 193 iterator end() { return FactIDs.end(); } 194 const_iterator end() const { return FactIDs.end(); } 195 196 bool isEmpty() const { return FactIDs.size() == 0; } 197 198 // Return true if the set contains only negative facts 199 bool isEmpty(FactManager &FactMan) const { 200 for (const auto FID : *this) { 201 if (!FactMan[FID].negative()) 202 return false; 203 } 204 return true; 205 } 206 207 void addLockByID(FactID ID) { FactIDs.push_back(ID); } 208 209 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { 210 FactID F = FM.newFact(std::move(Entry)); 211 FactIDs.push_back(F); 212 return F; 213 } 214 215 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { 216 unsigned n = FactIDs.size(); 217 if (n == 0) 218 return false; 219 220 for (unsigned i = 0; i < n-1; ++i) { 221 if (FM[FactIDs[i]].matches(CapE)) { 222 FactIDs[i] = FactIDs[n-1]; 223 FactIDs.pop_back(); 224 return true; 225 } 226 } 227 if (FM[FactIDs[n-1]].matches(CapE)) { 228 FactIDs.pop_back(); 229 return true; 230 } 231 return false; 232 } 233 234 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { 235 return std::find_if(begin(), end(), [&](FactID ID) { 236 return FM[ID].matches(CapE); 237 }); 238 } 239 240 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { 241 auto I = std::find_if(begin(), end(), [&](FactID ID) { 242 return FM[ID].matches(CapE); 243 }); 244 return I != end() ? &FM[*I] : nullptr; 245 } 246 247 const FactEntry *findLockUniv(FactManager &FM, 248 const CapabilityExpr &CapE) const { 249 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 250 return FM[ID].matchesUniv(CapE); 251 }); 252 return I != end() ? &FM[*I] : nullptr; 253 } 254 255 const FactEntry *findPartialMatch(FactManager &FM, 256 const CapabilityExpr &CapE) const { 257 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 258 return FM[ID].partiallyMatches(CapE); 259 }); 260 return I != end() ? &FM[*I] : nullptr; 261 } 262 263 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { 264 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 265 return FM[ID].valueDecl() == Vd; 266 }); 267 return I != end(); 268 } 269 }; 270 271 class ThreadSafetyAnalyzer; 272 273 } // namespace 274 275 namespace clang { 276 namespace threadSafety { 277 278 class BeforeSet { 279 private: 280 using BeforeVect = SmallVector<const ValueDecl *, 4>; 281 282 struct BeforeInfo { 283 BeforeVect Vect; 284 int Visited = 0; 285 286 BeforeInfo() = default; 287 BeforeInfo(BeforeInfo &&) = default; 288 }; 289 290 using BeforeMap = 291 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; 292 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; 293 294 public: 295 BeforeSet() = default; 296 297 BeforeInfo* insertAttrExprs(const ValueDecl* Vd, 298 ThreadSafetyAnalyzer& Analyzer); 299 300 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, 301 ThreadSafetyAnalyzer &Analyzer); 302 303 void checkBeforeAfter(const ValueDecl* Vd, 304 const FactSet& FSet, 305 ThreadSafetyAnalyzer& Analyzer, 306 SourceLocation Loc, StringRef CapKind); 307 308 private: 309 BeforeMap BMap; 310 CycleMap CycMap; 311 }; 312 313 } // namespace threadSafety 314 } // namespace clang 315 316 namespace { 317 318 class LocalVariableMap; 319 320 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; 321 322 /// A side (entry or exit) of a CFG node. 323 enum CFGBlockSide { CBS_Entry, CBS_Exit }; 324 325 /// CFGBlockInfo is a struct which contains all the information that is 326 /// maintained for each block in the CFG. See LocalVariableMap for more 327 /// information about the contexts. 328 struct CFGBlockInfo { 329 // Lockset held at entry to block 330 FactSet EntrySet; 331 332 // Lockset held at exit from block 333 FactSet ExitSet; 334 335 // Context held at entry to block 336 LocalVarContext EntryContext; 337 338 // Context held at exit from block 339 LocalVarContext ExitContext; 340 341 // Location of first statement in block 342 SourceLocation EntryLoc; 343 344 // Location of last statement in block. 345 SourceLocation ExitLoc; 346 347 // Used to replay contexts later 348 unsigned EntryIndex; 349 350 // Is this block reachable? 351 bool Reachable = false; 352 353 const FactSet &getSet(CFGBlockSide Side) const { 354 return Side == CBS_Entry ? EntrySet : ExitSet; 355 } 356 357 SourceLocation getLocation(CFGBlockSide Side) const { 358 return Side == CBS_Entry ? EntryLoc : ExitLoc; 359 } 360 361 private: 362 CFGBlockInfo(LocalVarContext EmptyCtx) 363 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} 364 365 public: 366 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 367 }; 368 369 // A LocalVariableMap maintains a map from local variables to their currently 370 // valid definitions. It provides SSA-like functionality when traversing the 371 // CFG. Like SSA, each definition or assignment to a variable is assigned a 372 // unique name (an integer), which acts as the SSA name for that definition. 373 // The total set of names is shared among all CFG basic blocks. 374 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs 375 // with their SSA-names. Instead, we compute a Context for each point in the 376 // code, which maps local variables to the appropriate SSA-name. This map 377 // changes with each assignment. 378 // 379 // The map is computed in a single pass over the CFG. Subsequent analyses can 380 // then query the map to find the appropriate Context for a statement, and use 381 // that Context to look up the definitions of variables. 382 class LocalVariableMap { 383 public: 384 using Context = LocalVarContext; 385 386 /// A VarDefinition consists of an expression, representing the value of the 387 /// variable, along with the context in which that expression should be 388 /// interpreted. A reference VarDefinition does not itself contain this 389 /// information, but instead contains a pointer to a previous VarDefinition. 390 struct VarDefinition { 391 public: 392 friend class LocalVariableMap; 393 394 // The original declaration for this variable. 395 const NamedDecl *Dec; 396 397 // The expression for this variable, OR 398 const Expr *Exp = nullptr; 399 400 // Reference to another VarDefinition 401 unsigned Ref = 0; 402 403 // The map with which Exp should be interpreted. 404 Context Ctx; 405 406 bool isReference() { return !Exp; } 407 408 private: 409 // Create ordinary variable definition 410 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 411 : Dec(D), Exp(E), Ctx(C) {} 412 413 // Create reference to previous definition 414 VarDefinition(const NamedDecl *D, unsigned R, Context C) 415 : Dec(D), Ref(R), Ctx(C) {} 416 }; 417 418 private: 419 Context::Factory ContextFactory; 420 std::vector<VarDefinition> VarDefinitions; 421 std::vector<unsigned> CtxIndices; 422 std::vector<std::pair<const Stmt *, Context>> SavedContexts; 423 424 public: 425 LocalVariableMap() { 426 // index 0 is a placeholder for undefined variables (aka phi-nodes). 427 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); 428 } 429 430 /// Look up a definition, within the given context. 431 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 432 const unsigned *i = Ctx.lookup(D); 433 if (!i) 434 return nullptr; 435 assert(*i < VarDefinitions.size()); 436 return &VarDefinitions[*i]; 437 } 438 439 /// Look up the definition for D within the given context. Returns 440 /// NULL if the expression is not statically known. If successful, also 441 /// modifies Ctx to hold the context of the return Expr. 442 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 443 const unsigned *P = Ctx.lookup(D); 444 if (!P) 445 return nullptr; 446 447 unsigned i = *P; 448 while (i > 0) { 449 if (VarDefinitions[i].Exp) { 450 Ctx = VarDefinitions[i].Ctx; 451 return VarDefinitions[i].Exp; 452 } 453 i = VarDefinitions[i].Ref; 454 } 455 return nullptr; 456 } 457 458 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 459 460 /// Return the next context after processing S. This function is used by 461 /// clients of the class to get the appropriate context when traversing the 462 /// CFG. It must be called for every assignment or DeclStmt. 463 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { 464 if (SavedContexts[CtxIndex+1].first == S) { 465 CtxIndex++; 466 Context Result = SavedContexts[CtxIndex].second; 467 return Result; 468 } 469 return C; 470 } 471 472 void dumpVarDefinitionName(unsigned i) { 473 if (i == 0) { 474 llvm::errs() << "Undefined"; 475 return; 476 } 477 const NamedDecl *Dec = VarDefinitions[i].Dec; 478 if (!Dec) { 479 llvm::errs() << "<<NULL>>"; 480 return; 481 } 482 Dec->printName(llvm::errs()); 483 llvm::errs() << "." << i << " " << ((const void*) Dec); 484 } 485 486 /// Dumps an ASCII representation of the variable map to llvm::errs() 487 void dump() { 488 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 489 const Expr *Exp = VarDefinitions[i].Exp; 490 unsigned Ref = VarDefinitions[i].Ref; 491 492 dumpVarDefinitionName(i); 493 llvm::errs() << " = "; 494 if (Exp) Exp->dump(); 495 else { 496 dumpVarDefinitionName(Ref); 497 llvm::errs() << "\n"; 498 } 499 } 500 } 501 502 /// Dumps an ASCII representation of a Context to llvm::errs() 503 void dumpContext(Context C) { 504 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 505 const NamedDecl *D = I.getKey(); 506 D->printName(llvm::errs()); 507 const unsigned *i = C.lookup(D); 508 llvm::errs() << " -> "; 509 dumpVarDefinitionName(*i); 510 llvm::errs() << "\n"; 511 } 512 } 513 514 /// Builds the variable map. 515 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, 516 std::vector<CFGBlockInfo> &BlockInfo); 517 518 protected: 519 friend class VarMapBuilder; 520 521 // Get the current context index 522 unsigned getContextIndex() { return SavedContexts.size()-1; } 523 524 // Save the current context for later replay 525 void saveContext(const Stmt *S, Context C) { 526 SavedContexts.push_back(std::make_pair(S, C)); 527 } 528 529 // Adds a new definition to the given context, and returns a new context. 530 // This method should be called when declaring a new variable. 531 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { 532 assert(!Ctx.contains(D)); 533 unsigned newID = VarDefinitions.size(); 534 Context NewCtx = ContextFactory.add(Ctx, D, newID); 535 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 536 return NewCtx; 537 } 538 539 // Add a new reference to an existing definition. 540 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 541 unsigned newID = VarDefinitions.size(); 542 Context NewCtx = ContextFactory.add(Ctx, D, newID); 543 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 544 return NewCtx; 545 } 546 547 // Updates a definition only if that definition is already in the map. 548 // This method should be called when assigning to an existing variable. 549 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 550 if (Ctx.contains(D)) { 551 unsigned newID = VarDefinitions.size(); 552 Context NewCtx = ContextFactory.remove(Ctx, D); 553 NewCtx = ContextFactory.add(NewCtx, D, newID); 554 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 555 return NewCtx; 556 } 557 return Ctx; 558 } 559 560 // Removes a definition from the context, but keeps the variable name 561 // as a valid variable. The index 0 is a placeholder for cleared definitions. 562 Context clearDefinition(const NamedDecl *D, Context Ctx) { 563 Context NewCtx = Ctx; 564 if (NewCtx.contains(D)) { 565 NewCtx = ContextFactory.remove(NewCtx, D); 566 NewCtx = ContextFactory.add(NewCtx, D, 0); 567 } 568 return NewCtx; 569 } 570 571 // Remove a definition entirely frmo the context. 572 Context removeDefinition(const NamedDecl *D, Context Ctx) { 573 Context NewCtx = Ctx; 574 if (NewCtx.contains(D)) { 575 NewCtx = ContextFactory.remove(NewCtx, D); 576 } 577 return NewCtx; 578 } 579 580 Context intersectContexts(Context C1, Context C2); 581 Context createReferenceContext(Context C); 582 void intersectBackEdge(Context C1, Context C2); 583 }; 584 585 } // namespace 586 587 // This has to be defined after LocalVariableMap. 588 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 589 return CFGBlockInfo(M.getEmptyContext()); 590 } 591 592 namespace { 593 594 /// Visitor which builds a LocalVariableMap 595 class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { 596 public: 597 LocalVariableMap* VMap; 598 LocalVariableMap::Context Ctx; 599 600 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 601 : VMap(VM), Ctx(C) {} 602 603 void VisitDeclStmt(const DeclStmt *S); 604 void VisitBinaryOperator(const BinaryOperator *BO); 605 }; 606 607 } // namespace 608 609 // Add new local variables to the variable map 610 void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { 611 bool modifiedCtx = false; 612 const DeclGroupRef DGrp = S->getDeclGroup(); 613 for (const auto *D : DGrp) { 614 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { 615 const Expr *E = VD->getInit(); 616 617 // Add local variables with trivial type to the variable map 618 QualType T = VD->getType(); 619 if (T.isTrivialType(VD->getASTContext())) { 620 Ctx = VMap->addDefinition(VD, E, Ctx); 621 modifiedCtx = true; 622 } 623 } 624 } 625 if (modifiedCtx) 626 VMap->saveContext(S, Ctx); 627 } 628 629 // Update local variable definitions in variable map 630 void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { 631 if (!BO->isAssignmentOp()) 632 return; 633 634 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 635 636 // Update the variable map and current context. 637 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 638 const ValueDecl *VDec = DRE->getDecl(); 639 if (Ctx.lookup(VDec)) { 640 if (BO->getOpcode() == BO_Assign) 641 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 642 else 643 // FIXME -- handle compound assignment operators 644 Ctx = VMap->clearDefinition(VDec, Ctx); 645 VMap->saveContext(BO, Ctx); 646 } 647 } 648 } 649 650 // Computes the intersection of two contexts. The intersection is the 651 // set of variables which have the same definition in both contexts; 652 // variables with different definitions are discarded. 653 LocalVariableMap::Context 654 LocalVariableMap::intersectContexts(Context C1, Context C2) { 655 Context Result = C1; 656 for (const auto &P : C1) { 657 const NamedDecl *Dec = P.first; 658 const unsigned *i2 = C2.lookup(Dec); 659 if (!i2) // variable doesn't exist on second path 660 Result = removeDefinition(Dec, Result); 661 else if (*i2 != P.second) // variable exists, but has different definition 662 Result = clearDefinition(Dec, Result); 663 } 664 return Result; 665 } 666 667 // For every variable in C, create a new variable that refers to the 668 // definition in C. Return a new context that contains these new variables. 669 // (We use this for a naive implementation of SSA on loop back-edges.) 670 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 671 Context Result = getEmptyContext(); 672 for (const auto &P : C) 673 Result = addReference(P.first, P.second, Result); 674 return Result; 675 } 676 677 // This routine also takes the intersection of C1 and C2, but it does so by 678 // altering the VarDefinitions. C1 must be the result of an earlier call to 679 // createReferenceContext. 680 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 681 for (const auto &P : C1) { 682 unsigned i1 = P.second; 683 VarDefinition *VDef = &VarDefinitions[i1]; 684 assert(VDef->isReference()); 685 686 const unsigned *i2 = C2.lookup(P.first); 687 if (!i2 || (*i2 != i1)) 688 VDef->Ref = 0; // Mark this variable as undefined 689 } 690 } 691 692 // Traverse the CFG in topological order, so all predecessors of a block 693 // (excluding back-edges) are visited before the block itself. At 694 // each point in the code, we calculate a Context, which holds the set of 695 // variable definitions which are visible at that point in execution. 696 // Visible variables are mapped to their definitions using an array that 697 // contains all definitions. 698 // 699 // At join points in the CFG, the set is computed as the intersection of 700 // the incoming sets along each edge, E.g. 701 // 702 // { Context | VarDefinitions } 703 // int x = 0; { x -> x1 | x1 = 0 } 704 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 705 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 706 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 707 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 708 // 709 // This is essentially a simpler and more naive version of the standard SSA 710 // algorithm. Those definitions that remain in the intersection are from blocks 711 // that strictly dominate the current block. We do not bother to insert proper 712 // phi nodes, because they are not used in our analysis; instead, wherever 713 // a phi node would be required, we simply remove that definition from the 714 // context (E.g. x above). 715 // 716 // The initial traversal does not capture back-edges, so those need to be 717 // handled on a separate pass. Whenever the first pass encounters an 718 // incoming back edge, it duplicates the context, creating new definitions 719 // that refer back to the originals. (These correspond to places where SSA 720 // might have to insert a phi node.) On the second pass, these definitions are 721 // set to NULL if the variable has changed on the back-edge (i.e. a phi 722 // node was actually required.) E.g. 723 // 724 // { Context | VarDefinitions } 725 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 726 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 727 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 728 // ... { y -> y1 | x3 = 2, x2 = 1, ... } 729 void LocalVariableMap::traverseCFG(CFG *CFGraph, 730 const PostOrderCFGView *SortedGraph, 731 std::vector<CFGBlockInfo> &BlockInfo) { 732 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 733 734 CtxIndices.resize(CFGraph->getNumBlockIDs()); 735 736 for (const auto *CurrBlock : *SortedGraph) { 737 unsigned CurrBlockID = CurrBlock->getBlockID(); 738 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 739 740 VisitedBlocks.insert(CurrBlock); 741 742 // Calculate the entry context for the current block 743 bool HasBackEdges = false; 744 bool CtxInit = true; 745 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 746 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 747 // if *PI -> CurrBlock is a back edge, so skip it 748 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { 749 HasBackEdges = true; 750 continue; 751 } 752 753 unsigned PrevBlockID = (*PI)->getBlockID(); 754 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 755 756 if (CtxInit) { 757 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 758 CtxInit = false; 759 } 760 else { 761 CurrBlockInfo->EntryContext = 762 intersectContexts(CurrBlockInfo->EntryContext, 763 PrevBlockInfo->ExitContext); 764 } 765 } 766 767 // Duplicate the context if we have back-edges, so we can call 768 // intersectBackEdges later. 769 if (HasBackEdges) 770 CurrBlockInfo->EntryContext = 771 createReferenceContext(CurrBlockInfo->EntryContext); 772 773 // Create a starting context index for the current block 774 saveContext(nullptr, CurrBlockInfo->EntryContext); 775 CurrBlockInfo->EntryIndex = getContextIndex(); 776 777 // Visit all the statements in the basic block. 778 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 779 for (const auto &BI : *CurrBlock) { 780 switch (BI.getKind()) { 781 case CFGElement::Statement: { 782 CFGStmt CS = BI.castAs<CFGStmt>(); 783 VMapBuilder.Visit(CS.getStmt()); 784 break; 785 } 786 default: 787 break; 788 } 789 } 790 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 791 792 // Mark variables on back edges as "unknown" if they've been changed. 793 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 794 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 795 // if CurrBlock -> *SI is *not* a back edge 796 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 797 continue; 798 799 CFGBlock *FirstLoopBlock = *SI; 800 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 801 Context LoopEnd = CurrBlockInfo->ExitContext; 802 intersectBackEdge(LoopBegin, LoopEnd); 803 } 804 } 805 806 // Put an extra entry at the end of the indexed context array 807 unsigned exitID = CFGraph->getExit().getBlockID(); 808 saveContext(nullptr, BlockInfo[exitID].ExitContext); 809 } 810 811 /// Find the appropriate source locations to use when producing diagnostics for 812 /// each block in the CFG. 813 static void findBlockLocations(CFG *CFGraph, 814 const PostOrderCFGView *SortedGraph, 815 std::vector<CFGBlockInfo> &BlockInfo) { 816 for (const auto *CurrBlock : *SortedGraph) { 817 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 818 819 // Find the source location of the last statement in the block, if the 820 // block is not empty. 821 if (const Stmt *S = CurrBlock->getTerminatorStmt()) { 822 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); 823 } else { 824 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 825 BE = CurrBlock->rend(); BI != BE; ++BI) { 826 // FIXME: Handle other CFGElement kinds. 827 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 828 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); 829 break; 830 } 831 } 832 } 833 834 if (CurrBlockInfo->ExitLoc.isValid()) { 835 // This block contains at least one statement. Find the source location 836 // of the first statement in the block. 837 for (const auto &BI : *CurrBlock) { 838 // FIXME: Handle other CFGElement kinds. 839 if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { 840 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); 841 break; 842 } 843 } 844 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 845 CurrBlock != &CFGraph->getExit()) { 846 // The block is empty, and has a single predecessor. Use its exit 847 // location. 848 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 849 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 850 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) { 851 // The block is empty, and has a single successor. Use its entry 852 // location. 853 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 854 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc; 855 } 856 } 857 } 858 859 namespace { 860 861 class LockableFactEntry : public FactEntry { 862 public: 863 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 864 SourceKind Src = Acquired) 865 : FactEntry(CE, LK, Loc, Src) {} 866 867 void 868 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 869 SourceLocation JoinLoc, LockErrorKind LEK, 870 ThreadSafetyHandler &Handler) const override { 871 if (!asserted() && !negative() && !isUniversal()) { 872 Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc, 873 LEK); 874 } 875 } 876 877 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 878 ThreadSafetyHandler &Handler, 879 StringRef DiagKind) const override { 880 Handler.handleDoubleLock(DiagKind, entry.toString(), loc(), entry.loc()); 881 } 882 883 void handleUnlock(FactSet &FSet, FactManager &FactMan, 884 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 885 bool FullyRemove, ThreadSafetyHandler &Handler, 886 StringRef DiagKind) const override { 887 FSet.removeLock(FactMan, Cp); 888 if (!Cp.negative()) { 889 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 890 !Cp, LK_Exclusive, UnlockLoc)); 891 } 892 } 893 }; 894 895 class ScopedLockableFactEntry : public FactEntry { 896 private: 897 enum UnderlyingCapabilityKind { 898 UCK_Acquired, ///< Any kind of acquired capability. 899 UCK_ReleasedShared, ///< Shared capability that was released. 900 UCK_ReleasedExclusive, ///< Exclusive capability that was released. 901 }; 902 903 using UnderlyingCapability = 904 llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>; 905 906 SmallVector<UnderlyingCapability, 4> UnderlyingMutexes; 907 908 public: 909 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc) 910 : FactEntry(CE, LK_Exclusive, Loc, Acquired) {} 911 912 void addLock(const CapabilityExpr &M) { 913 UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired); 914 } 915 916 void addExclusiveUnlock(const CapabilityExpr &M) { 917 UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive); 918 } 919 920 void addSharedUnlock(const CapabilityExpr &M) { 921 UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared); 922 } 923 924 void 925 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 926 SourceLocation JoinLoc, LockErrorKind LEK, 927 ThreadSafetyHandler &Handler) const override { 928 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 929 const auto *Entry = FSet.findLock( 930 FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false)); 931 if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) || 932 (UnderlyingMutex.getInt() != UCK_Acquired && !Entry)) { 933 // If this scoped lock manages another mutex, and if the underlying 934 // mutex is still/not held, then warn about the underlying mutex. 935 Handler.handleMutexHeldEndOfScope( 936 "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc, 937 LEK); 938 } 939 } 940 } 941 942 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 943 ThreadSafetyHandler &Handler, 944 StringRef DiagKind) const override { 945 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 946 CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false); 947 948 if (UnderlyingMutex.getInt() == UCK_Acquired) 949 lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler, 950 DiagKind); 951 else 952 unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind); 953 } 954 } 955 956 void handleUnlock(FactSet &FSet, FactManager &FactMan, 957 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 958 bool FullyRemove, ThreadSafetyHandler &Handler, 959 StringRef DiagKind) const override { 960 assert(!Cp.negative() && "Managing object cannot be negative."); 961 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 962 CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false); 963 964 // Remove/lock the underlying mutex if it exists/is still unlocked; warn 965 // on double unlocking/locking if we're not destroying the scoped object. 966 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler; 967 if (UnderlyingMutex.getInt() == UCK_Acquired) { 968 unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind); 969 } else { 970 LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared 971 ? LK_Shared 972 : LK_Exclusive; 973 lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind); 974 } 975 } 976 if (FullyRemove) 977 FSet.removeLock(FactMan, Cp); 978 } 979 980 private: 981 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, 982 LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler, 983 StringRef DiagKind) const { 984 if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) { 985 if (Handler) 986 Handler->handleDoubleLock(DiagKind, Cp.toString(), Fact->loc(), loc); 987 } else { 988 FSet.removeLock(FactMan, !Cp); 989 FSet.addLock(FactMan, 990 std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed)); 991 } 992 } 993 994 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, 995 SourceLocation loc, ThreadSafetyHandler *Handler, 996 StringRef DiagKind) const { 997 if (FSet.findLock(FactMan, Cp)) { 998 FSet.removeLock(FactMan, Cp); 999 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 1000 !Cp, LK_Exclusive, loc)); 1001 } else if (Handler) { 1002 SourceLocation PrevLoc; 1003 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) 1004 PrevLoc = Neg->loc(); 1005 Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc); 1006 } 1007 } 1008 }; 1009 1010 /// Class which implements the core thread safety analysis routines. 1011 class ThreadSafetyAnalyzer { 1012 friend class BuildLockset; 1013 friend class threadSafety::BeforeSet; 1014 1015 llvm::BumpPtrAllocator Bpa; 1016 threadSafety::til::MemRegionRef Arena; 1017 threadSafety::SExprBuilder SxBuilder; 1018 1019 ThreadSafetyHandler &Handler; 1020 const CXXMethodDecl *CurrentMethod; 1021 LocalVariableMap LocalVarMap; 1022 FactManager FactMan; 1023 std::vector<CFGBlockInfo> BlockInfo; 1024 1025 BeforeSet *GlobalBeforeSet; 1026 1027 public: 1028 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) 1029 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} 1030 1031 bool inCurrentScope(const CapabilityExpr &CapE); 1032 1033 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, 1034 StringRef DiagKind, bool ReqAttr = false); 1035 void removeLock(FactSet &FSet, const CapabilityExpr &CapE, 1036 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, 1037 StringRef DiagKind); 1038 1039 template <typename AttrType> 1040 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1041 const NamedDecl *D, VarDecl *SelfDecl = nullptr); 1042 1043 template <class AttrType> 1044 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1045 const NamedDecl *D, 1046 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1047 Expr *BrE, bool Neg); 1048 1049 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1050 bool &Negate); 1051 1052 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1053 const CFGBlock* PredBlock, 1054 const CFGBlock *CurrBlock); 1055 1056 bool join(const FactEntry &a, const FactEntry &b, bool CanModify); 1057 1058 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet, 1059 SourceLocation JoinLoc, LockErrorKind EntryLEK, 1060 LockErrorKind ExitLEK); 1061 1062 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet, 1063 SourceLocation JoinLoc, LockErrorKind LEK) { 1064 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK); 1065 } 1066 1067 void runAnalysis(AnalysisDeclContext &AC); 1068 }; 1069 1070 } // namespace 1071 1072 /// Process acquired_before and acquired_after attributes on Vd. 1073 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, 1074 ThreadSafetyAnalyzer& Analyzer) { 1075 // Create a new entry for Vd. 1076 BeforeInfo *Info = nullptr; 1077 { 1078 // Keep InfoPtr in its own scope in case BMap is modified later and the 1079 // reference becomes invalid. 1080 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; 1081 if (!InfoPtr) 1082 InfoPtr.reset(new BeforeInfo()); 1083 Info = InfoPtr.get(); 1084 } 1085 1086 for (const auto *At : Vd->attrs()) { 1087 switch (At->getKind()) { 1088 case attr::AcquiredBefore: { 1089 const auto *A = cast<AcquiredBeforeAttr>(At); 1090 1091 // Read exprs from the attribute, and add them to BeforeVect. 1092 for (const auto *Arg : A->args()) { 1093 CapabilityExpr Cp = 1094 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1095 if (const ValueDecl *Cpvd = Cp.valueDecl()) { 1096 Info->Vect.push_back(Cpvd); 1097 const auto It = BMap.find(Cpvd); 1098 if (It == BMap.end()) 1099 insertAttrExprs(Cpvd, Analyzer); 1100 } 1101 } 1102 break; 1103 } 1104 case attr::AcquiredAfter: { 1105 const auto *A = cast<AcquiredAfterAttr>(At); 1106 1107 // Read exprs from the attribute, and add them to BeforeVect. 1108 for (const auto *Arg : A->args()) { 1109 CapabilityExpr Cp = 1110 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1111 if (const ValueDecl *ArgVd = Cp.valueDecl()) { 1112 // Get entry for mutex listed in attribute 1113 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); 1114 ArgInfo->Vect.push_back(Vd); 1115 } 1116 } 1117 break; 1118 } 1119 default: 1120 break; 1121 } 1122 } 1123 1124 return Info; 1125 } 1126 1127 BeforeSet::BeforeInfo * 1128 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, 1129 ThreadSafetyAnalyzer &Analyzer) { 1130 auto It = BMap.find(Vd); 1131 BeforeInfo *Info = nullptr; 1132 if (It == BMap.end()) 1133 Info = insertAttrExprs(Vd, Analyzer); 1134 else 1135 Info = It->second.get(); 1136 assert(Info && "BMap contained nullptr?"); 1137 return Info; 1138 } 1139 1140 /// Return true if any mutexes in FSet are in the acquired_before set of Vd. 1141 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, 1142 const FactSet& FSet, 1143 ThreadSafetyAnalyzer& Analyzer, 1144 SourceLocation Loc, StringRef CapKind) { 1145 SmallVector<BeforeInfo*, 8> InfoVect; 1146 1147 // Do a depth-first traversal of Vd. 1148 // Return true if there are cycles. 1149 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { 1150 if (!Vd) 1151 return false; 1152 1153 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); 1154 1155 if (Info->Visited == 1) 1156 return true; 1157 1158 if (Info->Visited == 2) 1159 return false; 1160 1161 if (Info->Vect.empty()) 1162 return false; 1163 1164 InfoVect.push_back(Info); 1165 Info->Visited = 1; 1166 for (const auto *Vdb : Info->Vect) { 1167 // Exclude mutexes in our immediate before set. 1168 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { 1169 StringRef L1 = StartVd->getName(); 1170 StringRef L2 = Vdb->getName(); 1171 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); 1172 } 1173 // Transitively search other before sets, and warn on cycles. 1174 if (traverse(Vdb)) { 1175 if (CycMap.find(Vd) == CycMap.end()) { 1176 CycMap.insert(std::make_pair(Vd, true)); 1177 StringRef L1 = Vd->getName(); 1178 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); 1179 } 1180 } 1181 } 1182 Info->Visited = 2; 1183 return false; 1184 }; 1185 1186 traverse(StartVd); 1187 1188 for (auto *Info : InfoVect) 1189 Info->Visited = 0; 1190 } 1191 1192 /// Gets the value decl pointer from DeclRefExprs or MemberExprs. 1193 static const ValueDecl *getValueDecl(const Expr *Exp) { 1194 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1195 return getValueDecl(CE->getSubExpr()); 1196 1197 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) 1198 return DR->getDecl(); 1199 1200 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) 1201 return ME->getMemberDecl(); 1202 1203 return nullptr; 1204 } 1205 1206 namespace { 1207 1208 template <typename Ty> 1209 class has_arg_iterator_range { 1210 using yes = char[1]; 1211 using no = char[2]; 1212 1213 template <typename Inner> 1214 static yes& test(Inner *I, decltype(I->args()) * = nullptr); 1215 1216 template <typename> 1217 static no& test(...); 1218 1219 public: 1220 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); 1221 }; 1222 1223 } // namespace 1224 1225 static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { 1226 return A->getName(); 1227 } 1228 1229 static StringRef ClassifyDiagnostic(QualType VDT) { 1230 // We need to look at the declaration of the type of the value to determine 1231 // which it is. The type should either be a record or a typedef, or a pointer 1232 // or reference thereof. 1233 if (const auto *RT = VDT->getAs<RecordType>()) { 1234 if (const auto *RD = RT->getDecl()) 1235 if (const auto *CA = RD->getAttr<CapabilityAttr>()) 1236 return ClassifyDiagnostic(CA); 1237 } else if (const auto *TT = VDT->getAs<TypedefType>()) { 1238 if (const auto *TD = TT->getDecl()) 1239 if (const auto *CA = TD->getAttr<CapabilityAttr>()) 1240 return ClassifyDiagnostic(CA); 1241 } else if (VDT->isPointerType() || VDT->isReferenceType()) 1242 return ClassifyDiagnostic(VDT->getPointeeType()); 1243 1244 return "mutex"; 1245 } 1246 1247 static StringRef ClassifyDiagnostic(const ValueDecl *VD) { 1248 assert(VD && "No ValueDecl passed"); 1249 1250 // The ValueDecl is the declaration of a mutex or role (hopefully). 1251 return ClassifyDiagnostic(VD->getType()); 1252 } 1253 1254 template <typename AttrTy> 1255 static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef> 1256 ClassifyDiagnostic(const AttrTy *A) { 1257 if (const ValueDecl *VD = getValueDecl(A->getArg())) 1258 return ClassifyDiagnostic(VD); 1259 return "mutex"; 1260 } 1261 1262 template <typename AttrTy> 1263 static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef> 1264 ClassifyDiagnostic(const AttrTy *A) { 1265 for (const auto *Arg : A->args()) { 1266 if (const ValueDecl *VD = getValueDecl(Arg)) 1267 return ClassifyDiagnostic(VD); 1268 } 1269 return "mutex"; 1270 } 1271 1272 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { 1273 const threadSafety::til::SExpr *SExp = CapE.sexpr(); 1274 assert(SExp && "Null expressions should be ignored"); 1275 1276 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) { 1277 const ValueDecl *VD = LP->clangDecl(); 1278 // Variables defined in a function are always inaccessible. 1279 if (!VD->isDefinedOutsideFunctionOrMethod()) 1280 return false; 1281 // For now we consider static class members to be inaccessible. 1282 if (isa<CXXRecordDecl>(VD->getDeclContext())) 1283 return false; 1284 // Global variables are always in scope. 1285 return true; 1286 } 1287 1288 // Members are in scope from methods of the same class. 1289 if (const auto *P = dyn_cast<til::Project>(SExp)) { 1290 if (!CurrentMethod) 1291 return false; 1292 const ValueDecl *VD = P->clangDecl(); 1293 return VD->getDeclContext() == CurrentMethod->getDeclContext(); 1294 } 1295 1296 return false; 1297 } 1298 1299 /// Add a new lock to the lockset, warning if the lock is already there. 1300 /// \param ReqAttr -- true if this is part of an initial Requires attribute. 1301 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, 1302 std::unique_ptr<FactEntry> Entry, 1303 StringRef DiagKind, bool ReqAttr) { 1304 if (Entry->shouldIgnore()) 1305 return; 1306 1307 if (!ReqAttr && !Entry->negative()) { 1308 // look for the negative capability, and remove it from the fact set. 1309 CapabilityExpr NegC = !*Entry; 1310 const FactEntry *Nen = FSet.findLock(FactMan, NegC); 1311 if (Nen) { 1312 FSet.removeLock(FactMan, NegC); 1313 } 1314 else { 1315 if (inCurrentScope(*Entry) && !Entry->asserted()) 1316 Handler.handleNegativeNotHeld(DiagKind, Entry->toString(), 1317 NegC.toString(), Entry->loc()); 1318 } 1319 } 1320 1321 // Check before/after constraints 1322 if (Handler.issueBetaWarnings() && 1323 !Entry->asserted() && !Entry->declared()) { 1324 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, 1325 Entry->loc(), DiagKind); 1326 } 1327 1328 // FIXME: Don't always warn when we have support for reentrant locks. 1329 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { 1330 if (!Entry->asserted()) 1331 Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind); 1332 } else { 1333 FSet.addLock(FactMan, std::move(Entry)); 1334 } 1335 } 1336 1337 /// Remove a lock from the lockset, warning if the lock is not there. 1338 /// \param UnlockLoc The source location of the unlock (only used in error msg) 1339 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, 1340 SourceLocation UnlockLoc, 1341 bool FullyRemove, LockKind ReceivedKind, 1342 StringRef DiagKind) { 1343 if (Cp.shouldIgnore()) 1344 return; 1345 1346 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1347 if (!LDat) { 1348 SourceLocation PrevLoc; 1349 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) 1350 PrevLoc = Neg->loc(); 1351 Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc); 1352 return; 1353 } 1354 1355 // Generic lock removal doesn't care about lock kind mismatches, but 1356 // otherwise diagnose when the lock kinds are mismatched. 1357 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { 1358 Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), LDat->kind(), 1359 ReceivedKind, LDat->loc(), UnlockLoc); 1360 } 1361 1362 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler, 1363 DiagKind); 1364 } 1365 1366 /// Extract the list of mutexIDs from the attribute on an expression, 1367 /// and push them onto Mtxs, discarding any duplicates. 1368 template <typename AttrType> 1369 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1370 const Expr *Exp, const NamedDecl *D, 1371 VarDecl *SelfDecl) { 1372 if (Attr->args_size() == 0) { 1373 // The mutex held is the "this" object. 1374 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl); 1375 if (Cp.isInvalid()) { 1376 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1377 return; 1378 } 1379 //else 1380 if (!Cp.shouldIgnore()) 1381 Mtxs.push_back_nodup(Cp); 1382 return; 1383 } 1384 1385 for (const auto *Arg : Attr->args()) { 1386 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl); 1387 if (Cp.isInvalid()) { 1388 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1389 continue; 1390 } 1391 //else 1392 if (!Cp.shouldIgnore()) 1393 Mtxs.push_back_nodup(Cp); 1394 } 1395 } 1396 1397 /// Extract the list of mutexIDs from a trylock attribute. If the 1398 /// trylock applies to the given edge, then push them onto Mtxs, discarding 1399 /// any duplicates. 1400 template <class AttrType> 1401 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1402 const Expr *Exp, const NamedDecl *D, 1403 const CFGBlock *PredBlock, 1404 const CFGBlock *CurrBlock, 1405 Expr *BrE, bool Neg) { 1406 // Find out which branch has the lock 1407 bool branch = false; 1408 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) 1409 branch = BLE->getValue(); 1410 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) 1411 branch = ILE->getValue().getBoolValue(); 1412 1413 int branchnum = branch ? 0 : 1; 1414 if (Neg) 1415 branchnum = !branchnum; 1416 1417 // If we've taken the trylock branch, then add the lock 1418 int i = 0; 1419 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1420 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1421 if (*SI == CurrBlock && i == branchnum) 1422 getMutexIDs(Mtxs, Attr, Exp, D); 1423 } 1424 } 1425 1426 static bool getStaticBooleanValue(Expr *E, bool &TCond) { 1427 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1428 TCond = false; 1429 return true; 1430 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1431 TCond = BLE->getValue(); 1432 return true; 1433 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { 1434 TCond = ILE->getValue().getBoolValue(); 1435 return true; 1436 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) 1437 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1438 return false; 1439 } 1440 1441 // If Cond can be traced back to a function call, return the call expression. 1442 // The negate variable should be called with false, and will be set to true 1443 // if the function call is negated, e.g. if (!mu.tryLock(...)) 1444 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1445 LocalVarContext C, 1446 bool &Negate) { 1447 if (!Cond) 1448 return nullptr; 1449 1450 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) { 1451 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect) 1452 return getTrylockCallExpr(CallExp->getArg(0), C, Negate); 1453 return CallExp; 1454 } 1455 else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) 1456 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1457 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) 1458 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1459 else if (const auto *FE = dyn_cast<FullExpr>(Cond)) 1460 return getTrylockCallExpr(FE->getSubExpr(), C, Negate); 1461 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1462 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1463 return getTrylockCallExpr(E, C, Negate); 1464 } 1465 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { 1466 if (UOP->getOpcode() == UO_LNot) { 1467 Negate = !Negate; 1468 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1469 } 1470 return nullptr; 1471 } 1472 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { 1473 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1474 if (BOP->getOpcode() == BO_NE) 1475 Negate = !Negate; 1476 1477 bool TCond = false; 1478 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1479 if (!TCond) Negate = !Negate; 1480 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1481 } 1482 TCond = false; 1483 if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1484 if (!TCond) Negate = !Negate; 1485 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1486 } 1487 return nullptr; 1488 } 1489 if (BOP->getOpcode() == BO_LAnd) { 1490 // LHS must have been evaluated in a different block. 1491 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1492 } 1493 if (BOP->getOpcode() == BO_LOr) 1494 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1495 return nullptr; 1496 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) { 1497 bool TCond, FCond; 1498 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) && 1499 getStaticBooleanValue(COP->getFalseExpr(), FCond)) { 1500 if (TCond && !FCond) 1501 return getTrylockCallExpr(COP->getCond(), C, Negate); 1502 if (!TCond && FCond) { 1503 Negate = !Negate; 1504 return getTrylockCallExpr(COP->getCond(), C, Negate); 1505 } 1506 } 1507 } 1508 return nullptr; 1509 } 1510 1511 /// Find the lockset that holds on the edge between PredBlock 1512 /// and CurrBlock. The edge set is the exit set of PredBlock (passed 1513 /// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1514 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1515 const FactSet &ExitSet, 1516 const CFGBlock *PredBlock, 1517 const CFGBlock *CurrBlock) { 1518 Result = ExitSet; 1519 1520 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1521 // We don't acquire try-locks on ?: branches, only when its result is used. 1522 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt())) 1523 return; 1524 1525 bool Negate = false; 1526 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1527 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1528 StringRef CapDiagKind = "mutex"; 1529 1530 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1531 if (!Exp) 1532 return; 1533 1534 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1535 if(!FunDecl || !FunDecl->hasAttrs()) 1536 return; 1537 1538 CapExprSet ExclusiveLocksToAdd; 1539 CapExprSet SharedLocksToAdd; 1540 1541 // If the condition is a call to a Trylock function, then grab the attributes 1542 for (const auto *Attr : FunDecl->attrs()) { 1543 switch (Attr->getKind()) { 1544 case attr::TryAcquireCapability: { 1545 auto *A = cast<TryAcquireCapabilityAttr>(Attr); 1546 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 1547 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), 1548 Negate); 1549 CapDiagKind = ClassifyDiagnostic(A); 1550 break; 1551 }; 1552 case attr::ExclusiveTrylockFunction: { 1553 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); 1554 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1555 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1556 CapDiagKind = ClassifyDiagnostic(A); 1557 break; 1558 } 1559 case attr::SharedTrylockFunction: { 1560 const auto *A = cast<SharedTrylockFunctionAttr>(Attr); 1561 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, 1562 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1563 CapDiagKind = ClassifyDiagnostic(A); 1564 break; 1565 } 1566 default: 1567 break; 1568 } 1569 } 1570 1571 // Add and remove locks. 1572 SourceLocation Loc = Exp->getExprLoc(); 1573 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) 1574 addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd, 1575 LK_Exclusive, Loc), 1576 CapDiagKind); 1577 for (const auto &SharedLockToAdd : SharedLocksToAdd) 1578 addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd, 1579 LK_Shared, Loc), 1580 CapDiagKind); 1581 } 1582 1583 namespace { 1584 1585 /// We use this class to visit different types of expressions in 1586 /// CFGBlocks, and build up the lockset. 1587 /// An expression may cause us to add or remove locks from the lockset, or else 1588 /// output error messages related to missing locks. 1589 /// FIXME: In future, we may be able to not inherit from a visitor. 1590 class BuildLockset : public ConstStmtVisitor<BuildLockset> { 1591 friend class ThreadSafetyAnalyzer; 1592 1593 ThreadSafetyAnalyzer *Analyzer; 1594 FactSet FSet; 1595 LocalVariableMap::Context LVarCtx; 1596 unsigned CtxIndex; 1597 1598 // helper functions 1599 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, 1600 Expr *MutexExp, ProtectedOperationKind POK, 1601 StringRef DiagKind, SourceLocation Loc); 1602 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, 1603 StringRef DiagKind); 1604 1605 void checkAccess(const Expr *Exp, AccessKind AK, 1606 ProtectedOperationKind POK = POK_VarAccess); 1607 void checkPtAccess(const Expr *Exp, AccessKind AK, 1608 ProtectedOperationKind POK = POK_VarAccess); 1609 1610 void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr); 1611 void examineArguments(const FunctionDecl *FD, 1612 CallExpr::const_arg_iterator ArgBegin, 1613 CallExpr::const_arg_iterator ArgEnd, 1614 bool SkipFirstParam = false); 1615 1616 public: 1617 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1618 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), 1619 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {} 1620 1621 void VisitUnaryOperator(const UnaryOperator *UO); 1622 void VisitBinaryOperator(const BinaryOperator *BO); 1623 void VisitCastExpr(const CastExpr *CE); 1624 void VisitCallExpr(const CallExpr *Exp); 1625 void VisitCXXConstructExpr(const CXXConstructExpr *Exp); 1626 void VisitDeclStmt(const DeclStmt *S); 1627 }; 1628 1629 } // namespace 1630 1631 /// Warn if the LSet does not contain a lock sufficient to protect access 1632 /// of at least the passed in AccessKind. 1633 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, 1634 AccessKind AK, Expr *MutexExp, 1635 ProtectedOperationKind POK, 1636 StringRef DiagKind, SourceLocation Loc) { 1637 LockKind LK = getLockKindFromAccessKind(AK); 1638 1639 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1640 if (Cp.isInvalid()) { 1641 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1642 return; 1643 } else if (Cp.shouldIgnore()) { 1644 return; 1645 } 1646 1647 if (Cp.negative()) { 1648 // Negative capabilities act like locks excluded 1649 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp); 1650 if (LDat) { 1651 Analyzer->Handler.handleFunExcludesLock( 1652 DiagKind, D->getNameAsString(), (!Cp).toString(), Loc); 1653 return; 1654 } 1655 1656 // If this does not refer to a negative capability in the same class, 1657 // then stop here. 1658 if (!Analyzer->inCurrentScope(Cp)) 1659 return; 1660 1661 // Otherwise the negative requirement must be propagated to the caller. 1662 LDat = FSet.findLock(Analyzer->FactMan, Cp); 1663 if (!LDat) { 1664 Analyzer->Handler.handleNegativeNotHeld(D, Cp.toString(), Loc); 1665 } 1666 return; 1667 } 1668 1669 const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp); 1670 bool NoError = true; 1671 if (!LDat) { 1672 // No exact match found. Look for a partial match. 1673 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp); 1674 if (LDat) { 1675 // Warn that there's no precise match. 1676 std::string PartMatchStr = LDat->toString(); 1677 StringRef PartMatchName(PartMatchStr); 1678 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1679 LK, Loc, &PartMatchName); 1680 } else { 1681 // Warn that there's no match at all. 1682 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1683 LK, Loc); 1684 } 1685 NoError = false; 1686 } 1687 // Make sure the mutex we found is the right kind. 1688 if (NoError && LDat && !LDat->isAtLeast(LK)) { 1689 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1690 LK, Loc); 1691 } 1692 } 1693 1694 /// Warn if the LSet contains the given lock. 1695 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, 1696 Expr *MutexExp, StringRef DiagKind) { 1697 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1698 if (Cp.isInvalid()) { 1699 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1700 return; 1701 } else if (Cp.shouldIgnore()) { 1702 return; 1703 } 1704 1705 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp); 1706 if (LDat) { 1707 Analyzer->Handler.handleFunExcludesLock( 1708 DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc()); 1709 } 1710 } 1711 1712 /// Checks guarded_by and pt_guarded_by attributes. 1713 /// Whenever we identify an access (read or write) to a DeclRefExpr that is 1714 /// marked with guarded_by, we must ensure the appropriate mutexes are held. 1715 /// Similarly, we check if the access is to an expression that dereferences 1716 /// a pointer marked with pt_guarded_by. 1717 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK, 1718 ProtectedOperationKind POK) { 1719 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); 1720 1721 SourceLocation Loc = Exp->getExprLoc(); 1722 1723 // Local variables of reference type cannot be re-assigned; 1724 // map them to their initializer. 1725 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { 1726 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); 1727 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { 1728 if (const auto *E = VD->getInit()) { 1729 // Guard against self-initialization. e.g., int &i = i; 1730 if (E == Exp) 1731 break; 1732 Exp = E; 1733 continue; 1734 } 1735 } 1736 break; 1737 } 1738 1739 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { 1740 // For dereferences 1741 if (UO->getOpcode() == UO_Deref) 1742 checkPtAccess(UO->getSubExpr(), AK, POK); 1743 return; 1744 } 1745 1746 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { 1747 checkPtAccess(AE->getLHS(), AK, POK); 1748 return; 1749 } 1750 1751 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { 1752 if (ME->isArrow()) 1753 checkPtAccess(ME->getBase(), AK, POK); 1754 else 1755 checkAccess(ME->getBase(), AK, POK); 1756 } 1757 1758 const ValueDecl *D = getValueDecl(Exp); 1759 if (!D || !D->hasAttrs()) 1760 return; 1761 1762 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) { 1763 Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc); 1764 } 1765 1766 for (const auto *I : D->specific_attrs<GuardedByAttr>()) 1767 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, 1768 ClassifyDiagnostic(I), Loc); 1769 } 1770 1771 /// Checks pt_guarded_by and pt_guarded_var attributes. 1772 /// POK is the same operationKind that was passed to checkAccess. 1773 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK, 1774 ProtectedOperationKind POK) { 1775 while (true) { 1776 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { 1777 Exp = PE->getSubExpr(); 1778 continue; 1779 } 1780 if (const auto *CE = dyn_cast<CastExpr>(Exp)) { 1781 if (CE->getCastKind() == CK_ArrayToPointerDecay) { 1782 // If it's an actual array, and not a pointer, then it's elements 1783 // are protected by GUARDED_BY, not PT_GUARDED_BY; 1784 checkAccess(CE->getSubExpr(), AK, POK); 1785 return; 1786 } 1787 Exp = CE->getSubExpr(); 1788 continue; 1789 } 1790 break; 1791 } 1792 1793 // Pass by reference warnings are under a different flag. 1794 ProtectedOperationKind PtPOK = POK_VarDereference; 1795 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; 1796 1797 const ValueDecl *D = getValueDecl(Exp); 1798 if (!D || !D->hasAttrs()) 1799 return; 1800 1801 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) 1802 Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK, 1803 Exp->getExprLoc()); 1804 1805 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) 1806 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, 1807 ClassifyDiagnostic(I), Exp->getExprLoc()); 1808 } 1809 1810 /// Process a function call, method call, constructor call, 1811 /// or destructor call. This involves looking at the attributes on the 1812 /// corresponding function/method/constructor/destructor, issuing warnings, 1813 /// and updating the locksets accordingly. 1814 /// 1815 /// FIXME: For classes annotated with one of the guarded annotations, we need 1816 /// to treat const method calls as reads and non-const method calls as writes, 1817 /// and check that the appropriate locks are held. Non-const method calls with 1818 /// the same signature as const method calls can be also treated as reads. 1819 /// 1820 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, 1821 VarDecl *VD) { 1822 SourceLocation Loc = Exp->getExprLoc(); 1823 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; 1824 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; 1825 CapExprSet ScopedReqsAndExcludes; 1826 StringRef CapDiagKind = "mutex"; 1827 1828 // Figure out if we're constructing an object of scoped lockable class 1829 bool isScopedVar = false; 1830 if (VD) { 1831 if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) { 1832 const CXXRecordDecl* PD = CD->getParent(); 1833 if (PD && PD->hasAttr<ScopedLockableAttr>()) 1834 isScopedVar = true; 1835 } 1836 } 1837 1838 for(const Attr *At : D->attrs()) { 1839 switch (At->getKind()) { 1840 // When we encounter a lock function, we need to add the lock to our 1841 // lockset. 1842 case attr::AcquireCapability: { 1843 const auto *A = cast<AcquireCapabilityAttr>(At); 1844 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1845 : ExclusiveLocksToAdd, 1846 A, Exp, D, VD); 1847 1848 CapDiagKind = ClassifyDiagnostic(A); 1849 break; 1850 } 1851 1852 // An assert will add a lock to the lockset, but will not generate 1853 // a warning if it is already there, and will not generate a warning 1854 // if it is not removed. 1855 case attr::AssertExclusiveLock: { 1856 const auto *A = cast<AssertExclusiveLockAttr>(At); 1857 1858 CapExprSet AssertLocks; 1859 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1860 for (const auto &AssertLock : AssertLocks) 1861 Analyzer->addLock( 1862 FSet, 1863 std::make_unique<LockableFactEntry>(AssertLock, LK_Exclusive, Loc, 1864 FactEntry::Asserted), 1865 ClassifyDiagnostic(A)); 1866 break; 1867 } 1868 case attr::AssertSharedLock: { 1869 const auto *A = cast<AssertSharedLockAttr>(At); 1870 1871 CapExprSet AssertLocks; 1872 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1873 for (const auto &AssertLock : AssertLocks) 1874 Analyzer->addLock( 1875 FSet, 1876 std::make_unique<LockableFactEntry>(AssertLock, LK_Shared, Loc, 1877 FactEntry::Asserted), 1878 ClassifyDiagnostic(A)); 1879 break; 1880 } 1881 1882 case attr::AssertCapability: { 1883 const auto *A = cast<AssertCapabilityAttr>(At); 1884 CapExprSet AssertLocks; 1885 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1886 for (const auto &AssertLock : AssertLocks) 1887 Analyzer->addLock(FSet, 1888 std::make_unique<LockableFactEntry>( 1889 AssertLock, 1890 A->isShared() ? LK_Shared : LK_Exclusive, Loc, 1891 FactEntry::Asserted), 1892 ClassifyDiagnostic(A)); 1893 break; 1894 } 1895 1896 // When we encounter an unlock function, we need to remove unlocked 1897 // mutexes from the lockset, and flag a warning if they are not there. 1898 case attr::ReleaseCapability: { 1899 const auto *A = cast<ReleaseCapabilityAttr>(At); 1900 if (A->isGeneric()) 1901 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD); 1902 else if (A->isShared()) 1903 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD); 1904 else 1905 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD); 1906 1907 CapDiagKind = ClassifyDiagnostic(A); 1908 break; 1909 } 1910 1911 case attr::RequiresCapability: { 1912 const auto *A = cast<RequiresCapabilityAttr>(At); 1913 for (auto *Arg : A->args()) { 1914 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, 1915 POK_FunctionCall, ClassifyDiagnostic(A), 1916 Exp->getExprLoc()); 1917 // use for adopting a lock 1918 if (isScopedVar) 1919 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD); 1920 } 1921 break; 1922 } 1923 1924 case attr::LocksExcluded: { 1925 const auto *A = cast<LocksExcludedAttr>(At); 1926 for (auto *Arg : A->args()) { 1927 warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A)); 1928 // use for deferring a lock 1929 if (isScopedVar) 1930 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD); 1931 } 1932 break; 1933 } 1934 1935 // Ignore attributes unrelated to thread-safety 1936 default: 1937 break; 1938 } 1939 } 1940 1941 // Remove locks first to allow lock upgrading/downgrading. 1942 // FIXME -- should only fully remove if the attribute refers to 'this'. 1943 bool Dtor = isa<CXXDestructorDecl>(D); 1944 for (const auto &M : ExclusiveLocksToRemove) 1945 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind); 1946 for (const auto &M : SharedLocksToRemove) 1947 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind); 1948 for (const auto &M : GenericLocksToRemove) 1949 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind); 1950 1951 // Add locks. 1952 FactEntry::SourceKind Source = 1953 isScopedVar ? FactEntry::Managed : FactEntry::Acquired; 1954 for (const auto &M : ExclusiveLocksToAdd) 1955 Analyzer->addLock( 1956 FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, Loc, Source), 1957 CapDiagKind); 1958 for (const auto &M : SharedLocksToAdd) 1959 Analyzer->addLock( 1960 FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source), 1961 CapDiagKind); 1962 1963 if (isScopedVar) { 1964 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 1965 SourceLocation MLoc = VD->getLocation(); 1966 DeclRefExpr DRE(VD->getASTContext(), VD, false, VD->getType(), VK_LValue, 1967 VD->getLocation()); 1968 // FIXME: does this store a pointer to DRE? 1969 CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); 1970 1971 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, MLoc); 1972 for (const auto &M : ExclusiveLocksToAdd) 1973 ScopedEntry->addLock(M); 1974 for (const auto &M : SharedLocksToAdd) 1975 ScopedEntry->addLock(M); 1976 for (const auto &M : ScopedReqsAndExcludes) 1977 ScopedEntry->addLock(M); 1978 for (const auto &M : ExclusiveLocksToRemove) 1979 ScopedEntry->addExclusiveUnlock(M); 1980 for (const auto &M : SharedLocksToRemove) 1981 ScopedEntry->addSharedUnlock(M); 1982 Analyzer->addLock(FSet, std::move(ScopedEntry), CapDiagKind); 1983 } 1984 } 1985 1986 /// For unary operations which read and write a variable, we need to 1987 /// check whether we hold any required mutexes. Reads are checked in 1988 /// VisitCastExpr. 1989 void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { 1990 switch (UO->getOpcode()) { 1991 case UO_PostDec: 1992 case UO_PostInc: 1993 case UO_PreDec: 1994 case UO_PreInc: 1995 checkAccess(UO->getSubExpr(), AK_Written); 1996 break; 1997 default: 1998 break; 1999 } 2000 } 2001 2002 /// For binary operations which assign to a variable (writes), we need to check 2003 /// whether we hold any required mutexes. 2004 /// FIXME: Deal with non-primitive types. 2005 void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { 2006 if (!BO->isAssignmentOp()) 2007 return; 2008 2009 // adjust the context 2010 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 2011 2012 checkAccess(BO->getLHS(), AK_Written); 2013 } 2014 2015 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and 2016 /// need to ensure we hold any required mutexes. 2017 /// FIXME: Deal with non-primitive types. 2018 void BuildLockset::VisitCastExpr(const CastExpr *CE) { 2019 if (CE->getCastKind() != CK_LValueToRValue) 2020 return; 2021 checkAccess(CE->getSubExpr(), AK_Read); 2022 } 2023 2024 void BuildLockset::examineArguments(const FunctionDecl *FD, 2025 CallExpr::const_arg_iterator ArgBegin, 2026 CallExpr::const_arg_iterator ArgEnd, 2027 bool SkipFirstParam) { 2028 // Currently we can't do anything if we don't know the function declaration. 2029 if (!FD) 2030 return; 2031 2032 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it 2033 // only turns off checking within the body of a function, but we also 2034 // use it to turn off checking in arguments to the function. This 2035 // could result in some false negatives, but the alternative is to 2036 // create yet another attribute. 2037 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>()) 2038 return; 2039 2040 const ArrayRef<ParmVarDecl *> Params = FD->parameters(); 2041 auto Param = Params.begin(); 2042 if (SkipFirstParam) 2043 ++Param; 2044 2045 // There can be default arguments, so we stop when one iterator is at end(). 2046 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd; 2047 ++Param, ++Arg) { 2048 QualType Qt = (*Param)->getType(); 2049 if (Qt->isReferenceType()) 2050 checkAccess(*Arg, AK_Read, POK_PassByRef); 2051 } 2052 } 2053 2054 void BuildLockset::VisitCallExpr(const CallExpr *Exp) { 2055 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 2056 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); 2057 // ME can be null when calling a method pointer 2058 const CXXMethodDecl *MD = CE->getMethodDecl(); 2059 2060 if (ME && MD) { 2061 if (ME->isArrow()) { 2062 // Should perhaps be AK_Written if !MD->isConst(). 2063 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 2064 } else { 2065 // Should perhaps be AK_Written if !MD->isConst(). 2066 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2067 } 2068 } 2069 2070 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end()); 2071 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 2072 auto OEop = OE->getOperator(); 2073 switch (OEop) { 2074 case OO_Equal: { 2075 const Expr *Target = OE->getArg(0); 2076 const Expr *Source = OE->getArg(1); 2077 checkAccess(Target, AK_Written); 2078 checkAccess(Source, AK_Read); 2079 break; 2080 } 2081 case OO_Star: 2082 case OO_Arrow: 2083 case OO_Subscript: 2084 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { 2085 // Grrr. operator* can be multiplication... 2086 checkPtAccess(OE->getArg(0), AK_Read); 2087 } 2088 LLVM_FALLTHROUGH; 2089 default: { 2090 // TODO: get rid of this, and rely on pass-by-ref instead. 2091 const Expr *Obj = OE->getArg(0); 2092 checkAccess(Obj, AK_Read); 2093 // Check the remaining arguments. For method operators, the first 2094 // argument is the implicit self argument, and doesn't appear in the 2095 // FunctionDecl, but for non-methods it does. 2096 const FunctionDecl *FD = OE->getDirectCallee(); 2097 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(), 2098 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD)); 2099 break; 2100 } 2101 } 2102 } else { 2103 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end()); 2104 } 2105 2106 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2107 if(!D || !D->hasAttrs()) 2108 return; 2109 handleCall(Exp, D); 2110 } 2111 2112 void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { 2113 const CXXConstructorDecl *D = Exp->getConstructor(); 2114 if (D && D->isCopyConstructor()) { 2115 const Expr* Source = Exp->getArg(0); 2116 checkAccess(Source, AK_Read); 2117 } else { 2118 examineArguments(D, Exp->arg_begin(), Exp->arg_end()); 2119 } 2120 } 2121 2122 static CXXConstructorDecl * 2123 findConstructorForByValueReturn(const CXXRecordDecl *RD) { 2124 // Prefer a move constructor over a copy constructor. If there's more than 2125 // one copy constructor or more than one move constructor, we arbitrarily 2126 // pick the first declared such constructor rather than trying to guess which 2127 // one is more appropriate. 2128 CXXConstructorDecl *CopyCtor = nullptr; 2129 for (auto *Ctor : RD->ctors()) { 2130 if (Ctor->isDeleted()) 2131 continue; 2132 if (Ctor->isMoveConstructor()) 2133 return Ctor; 2134 if (!CopyCtor && Ctor->isCopyConstructor()) 2135 CopyCtor = Ctor; 2136 } 2137 return CopyCtor; 2138 } 2139 2140 static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args, 2141 SourceLocation Loc) { 2142 ASTContext &Ctx = CD->getASTContext(); 2143 return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc, 2144 CD, true, Args, false, false, false, false, 2145 CXXConstructExpr::CK_Complete, 2146 SourceRange(Loc, Loc)); 2147 } 2148 2149 void BuildLockset::VisitDeclStmt(const DeclStmt *S) { 2150 // adjust the context 2151 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2152 2153 for (auto *D : S->getDeclGroup()) { 2154 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { 2155 Expr *E = VD->getInit(); 2156 if (!E) 2157 continue; 2158 E = E->IgnoreParens(); 2159 2160 // handle constructors that involve temporaries 2161 if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) 2162 E = EWC->getSubExpr()->IgnoreParens(); 2163 if (auto *CE = dyn_cast<CastExpr>(E)) 2164 if (CE->getCastKind() == CK_NoOp || 2165 CE->getCastKind() == CK_ConstructorConversion || 2166 CE->getCastKind() == CK_UserDefinedConversion) 2167 E = CE->getSubExpr()->IgnoreParens(); 2168 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) 2169 E = BTE->getSubExpr()->IgnoreParens(); 2170 2171 if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) { 2172 const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2173 if (!CtorD || !CtorD->hasAttrs()) 2174 continue; 2175 handleCall(E, CtorD, VD); 2176 } else if (isa<CallExpr>(E) && E->isPRValue()) { 2177 // If the object is initialized by a function call that returns a 2178 // scoped lockable by value, use the attributes on the copy or move 2179 // constructor to figure out what effect that should have on the 2180 // lockset. 2181 // FIXME: Is this really the best way to handle this situation? 2182 auto *RD = E->getType()->getAsCXXRecordDecl(); 2183 if (!RD || !RD->hasAttr<ScopedLockableAttr>()) 2184 continue; 2185 CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD); 2186 if (!CtorD || !CtorD->hasAttrs()) 2187 continue; 2188 handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD); 2189 } 2190 } 2191 } 2192 } 2193 2194 /// Given two facts merging on a join point, possibly warn and decide whether to 2195 /// keep or replace. 2196 /// 2197 /// \param CanModify Whether we can replace \p A by \p B. 2198 /// \return false if we should keep \p A, true if we should take \p B. 2199 bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B, 2200 bool CanModify) { 2201 if (A.kind() != B.kind()) { 2202 // For managed capabilities, the destructor should unlock in the right mode 2203 // anyway. For asserted capabilities no unlocking is needed. 2204 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) { 2205 // The shared capability subsumes the exclusive capability, if possible. 2206 bool ShouldTakeB = B.kind() == LK_Shared; 2207 if (CanModify || !ShouldTakeB) 2208 return ShouldTakeB; 2209 } 2210 Handler.handleExclusiveAndShared("mutex", B.toString(), B.loc(), A.loc()); 2211 // Take the exclusive capability to reduce further warnings. 2212 return CanModify && B.kind() == LK_Exclusive; 2213 } else { 2214 // The non-asserted capability is the one we want to track. 2215 return CanModify && A.asserted() && !B.asserted(); 2216 } 2217 } 2218 2219 /// Compute the intersection of two locksets and issue warnings for any 2220 /// locks in the symmetric difference. 2221 /// 2222 /// This function is used at a merge point in the CFG when comparing the lockset 2223 /// of each branch being merged. For example, given the following sequence: 2224 /// A; if () then B; else C; D; we need to check that the lockset after B and C 2225 /// are the same. In the event of a difference, we use the intersection of these 2226 /// two locksets at the start of D. 2227 /// 2228 /// \param EntrySet A lockset for entry into a (possibly new) block. 2229 /// \param ExitSet The lockset on exiting a preceding block. 2230 /// \param JoinLoc The location of the join point for error reporting 2231 /// \param EntryLEK The warning if a mutex is missing from \p EntrySet. 2232 /// \param ExitLEK The warning if a mutex is missing from \p ExitSet. 2233 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet, 2234 const FactSet &ExitSet, 2235 SourceLocation JoinLoc, 2236 LockErrorKind EntryLEK, 2237 LockErrorKind ExitLEK) { 2238 FactSet EntrySetOrig = EntrySet; 2239 2240 // Find locks in ExitSet that conflict or are not in EntrySet, and warn. 2241 for (const auto &Fact : ExitSet) { 2242 const FactEntry &ExitFact = FactMan[Fact]; 2243 2244 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact); 2245 if (EntryIt != EntrySet.end()) { 2246 if (join(FactMan[*EntryIt], ExitFact, 2247 EntryLEK != LEK_LockedSomeLoopIterations)) 2248 *EntryIt = Fact; 2249 } else if (!ExitFact.managed()) { 2250 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc, 2251 EntryLEK, Handler); 2252 } 2253 } 2254 2255 // Find locks in EntrySet that are not in ExitSet, and remove them. 2256 for (const auto &Fact : EntrySetOrig) { 2257 const FactEntry *EntryFact = &FactMan[Fact]; 2258 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact); 2259 2260 if (!ExitFact) { 2261 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations) 2262 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc, 2263 ExitLEK, Handler); 2264 if (ExitLEK == LEK_LockedSomePredecessors) 2265 EntrySet.removeLock(FactMan, *EntryFact); 2266 } 2267 } 2268 } 2269 2270 // Return true if block B never continues to its successors. 2271 static bool neverReturns(const CFGBlock *B) { 2272 if (B->hasNoReturnElement()) 2273 return true; 2274 if (B->empty()) 2275 return false; 2276 2277 CFGElement Last = B->back(); 2278 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2279 if (isa<CXXThrowExpr>(S->getStmt())) 2280 return true; 2281 } 2282 return false; 2283 } 2284 2285 /// Check a function's CFG for thread-safety violations. 2286 /// 2287 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2288 /// at the end of each block, and issue warnings for thread safety violations. 2289 /// Each block in the CFG is traversed exactly once. 2290 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2291 // TODO: this whole function needs be rewritten as a visitor for CFGWalker. 2292 // For now, we just use the walker to set things up. 2293 threadSafety::CFGWalker walker; 2294 if (!walker.init(AC)) 2295 return; 2296 2297 // AC.dumpCFG(true); 2298 // threadSafety::printSCFG(walker); 2299 2300 CFG *CFGraph = walker.getGraph(); 2301 const NamedDecl *D = walker.getDecl(); 2302 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D); 2303 CurrentMethod = dyn_cast<CXXMethodDecl>(D); 2304 2305 if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) 2306 return; 2307 2308 // FIXME: Do something a bit more intelligent inside constructor and 2309 // destructor code. Constructors and destructors must assume unique access 2310 // to 'this', so checks on member variable access is disabled, but we should 2311 // still enable checks on other objects. 2312 if (isa<CXXConstructorDecl>(D)) 2313 return; // Don't check inside constructors. 2314 if (isa<CXXDestructorDecl>(D)) 2315 return; // Don't check inside destructors. 2316 2317 Handler.enterFunction(CurrentFunction); 2318 2319 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2320 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2321 2322 // We need to explore the CFG via a "topological" ordering. 2323 // That way, we will be guaranteed to have information about required 2324 // predecessor locksets when exploring a new block. 2325 const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); 2326 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2327 2328 // Mark entry block as reachable 2329 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; 2330 2331 // Compute SSA names for local variables 2332 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2333 2334 // Fill in source locations for all CFGBlocks. 2335 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2336 2337 CapExprSet ExclusiveLocksAcquired; 2338 CapExprSet SharedLocksAcquired; 2339 CapExprSet LocksReleased; 2340 2341 // Add locks from exclusive_locks_required and shared_locks_required 2342 // to initial lockset. Also turn off checking for lock and unlock functions. 2343 // FIXME: is there a more intelligent way to check lock/unlock functions? 2344 if (!SortedGraph->empty() && D->hasAttrs()) { 2345 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2346 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2347 2348 CapExprSet ExclusiveLocksToAdd; 2349 CapExprSet SharedLocksToAdd; 2350 StringRef CapDiagKind = "mutex"; 2351 2352 SourceLocation Loc = D->getLocation(); 2353 for (const auto *Attr : D->attrs()) { 2354 Loc = Attr->getLocation(); 2355 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2356 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2357 nullptr, D); 2358 CapDiagKind = ClassifyDiagnostic(A); 2359 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2360 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2361 // We must ignore such methods. 2362 if (A->args_size() == 0) 2363 return; 2364 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2365 nullptr, D); 2366 getMutexIDs(LocksReleased, A, nullptr, D); 2367 CapDiagKind = ClassifyDiagnostic(A); 2368 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2369 if (A->args_size() == 0) 2370 return; 2371 getMutexIDs(A->isShared() ? SharedLocksAcquired 2372 : ExclusiveLocksAcquired, 2373 A, nullptr, D); 2374 CapDiagKind = ClassifyDiagnostic(A); 2375 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2376 // Don't try to check trylock functions for now. 2377 return; 2378 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2379 // Don't try to check trylock functions for now. 2380 return; 2381 } else if (isa<TryAcquireCapabilityAttr>(Attr)) { 2382 // Don't try to check trylock functions for now. 2383 return; 2384 } 2385 } 2386 2387 // FIXME -- Loc can be wrong here. 2388 for (const auto &Mu : ExclusiveLocksToAdd) { 2389 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc, 2390 FactEntry::Declared); 2391 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2392 } 2393 for (const auto &Mu : SharedLocksToAdd) { 2394 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc, 2395 FactEntry::Declared); 2396 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2397 } 2398 } 2399 2400 for (const auto *CurrBlock : *SortedGraph) { 2401 unsigned CurrBlockID = CurrBlock->getBlockID(); 2402 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2403 2404 // Use the default initial lockset in case there are no predecessors. 2405 VisitedBlocks.insert(CurrBlock); 2406 2407 // Iterate through the predecessor blocks and warn if the lockset for all 2408 // predecessors is not the same. We take the entry lockset of the current 2409 // block to be the intersection of all previous locksets. 2410 // FIXME: By keeping the intersection, we may output more errors in future 2411 // for a lock which is not in the intersection, but was in the union. We 2412 // may want to also keep the union in future. As an example, let's say 2413 // the intersection contains Mutex L, and the union contains L and M. 2414 // Later we unlock M. At this point, we would output an error because we 2415 // never locked M; although the real error is probably that we forgot to 2416 // lock M on all code paths. Conversely, let's say that later we lock M. 2417 // In this case, we should compare against the intersection instead of the 2418 // union because the real error is probably that we forgot to unlock M on 2419 // all code paths. 2420 bool LocksetInitialized = false; 2421 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2422 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2423 // if *PI -> CurrBlock is a back edge 2424 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) 2425 continue; 2426 2427 unsigned PrevBlockID = (*PI)->getBlockID(); 2428 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2429 2430 // Ignore edges from blocks that can't return. 2431 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2432 continue; 2433 2434 // Okay, we can reach this block from the entry. 2435 CurrBlockInfo->Reachable = true; 2436 2437 FactSet PrevLockset; 2438 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2439 2440 if (!LocksetInitialized) { 2441 CurrBlockInfo->EntrySet = PrevLockset; 2442 LocksetInitialized = true; 2443 } else { 2444 // Surprisingly 'continue' doesn't always produce back edges, because 2445 // the CFG has empty "transition" blocks where they meet with the end 2446 // of the regular loop body. We still want to diagnose them as loop. 2447 intersectAndWarn( 2448 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc, 2449 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt()) 2450 ? LEK_LockedSomeLoopIterations 2451 : LEK_LockedSomePredecessors); 2452 } 2453 } 2454 2455 // Skip rest of block if it's not reachable. 2456 if (!CurrBlockInfo->Reachable) 2457 continue; 2458 2459 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2460 2461 // Visit all the statements in the basic block. 2462 for (const auto &BI : *CurrBlock) { 2463 switch (BI.getKind()) { 2464 case CFGElement::Statement: { 2465 CFGStmt CS = BI.castAs<CFGStmt>(); 2466 LocksetBuilder.Visit(CS.getStmt()); 2467 break; 2468 } 2469 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2470 case CFGElement::AutomaticObjectDtor: { 2471 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); 2472 const auto *DD = AD.getDestructorDecl(AC.getASTContext()); 2473 if (!DD->hasAttrs()) 2474 break; 2475 2476 // Create a dummy expression, 2477 auto *VD = const_cast<VarDecl *>(AD.getVarDecl()); 2478 DeclRefExpr DRE(VD->getASTContext(), VD, false, 2479 VD->getType().getNonReferenceType(), VK_LValue, 2480 AD.getTriggerStmt()->getEndLoc()); 2481 LocksetBuilder.handleCall(&DRE, DD); 2482 break; 2483 } 2484 default: 2485 break; 2486 } 2487 } 2488 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2489 2490 // For every back edge from CurrBlock (the end of the loop) to another block 2491 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2492 // the one held at the beginning of FirstLoopBlock. We can look up the 2493 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2494 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2495 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2496 // if CurrBlock -> *SI is *not* a back edge 2497 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 2498 continue; 2499 2500 CFGBlock *FirstLoopBlock = *SI; 2501 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2502 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2503 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc, 2504 LEK_LockedSomeLoopIterations); 2505 } 2506 } 2507 2508 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2509 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2510 2511 // Skip the final check if the exit block is unreachable. 2512 if (!Final->Reachable) 2513 return; 2514 2515 // By default, we expect all locks held on entry to be held on exit. 2516 FactSet ExpectedExitSet = Initial->EntrySet; 2517 2518 // Adjust the expected exit set by adding or removing locks, as declared 2519 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2520 // issue the appropriate warning. 2521 // FIXME: the location here is not quite right. 2522 for (const auto &Lock : ExclusiveLocksAcquired) 2523 ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 2524 Lock, LK_Exclusive, D->getLocation())); 2525 for (const auto &Lock : SharedLocksAcquired) 2526 ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 2527 Lock, LK_Shared, D->getLocation())); 2528 for (const auto &Lock : LocksReleased) 2529 ExpectedExitSet.removeLock(FactMan, Lock); 2530 2531 // FIXME: Should we call this function for all blocks which exit the function? 2532 intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc, 2533 LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction); 2534 2535 Handler.leaveFunction(CurrentFunction); 2536 } 2537 2538 /// Check a function's CFG for thread-safety violations. 2539 /// 2540 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2541 /// at the end of each block, and issue warnings for thread safety violations. 2542 /// Each block in the CFG is traversed exactly once. 2543 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2544 ThreadSafetyHandler &Handler, 2545 BeforeSet **BSet) { 2546 if (!*BSet) 2547 *BSet = new BeforeSet; 2548 ThreadSafetyAnalyzer Analyzer(Handler, *BSet); 2549 Analyzer.runAnalysis(AC); 2550 } 2551 2552 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } 2553 2554 /// Helper function that returns a LockKind required for the given level 2555 /// of access. 2556 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { 2557 switch (AK) { 2558 case AK_Read : 2559 return LK_Shared; 2560 case AK_Written : 2561 return LK_Exclusive; 2562 } 2563 llvm_unreachable("Unknown AccessKind"); 2564 } 2565