1 //===- ThreadSafety.cpp ---------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // A intra-procedural analysis for thread safety (e.g. deadlocks and race 10 // conditions), based off of an annotation system. 11 // 12 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html 13 // for more information. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "clang/Analysis/Analyses/ThreadSafety.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/DeclGroup.h" 22 #include "clang/AST/Expr.h" 23 #include "clang/AST/ExprCXX.h" 24 #include "clang/AST/OperationKinds.h" 25 #include "clang/AST/Stmt.h" 26 #include "clang/AST/StmtVisitor.h" 27 #include "clang/AST/Type.h" 28 #include "clang/Analysis/Analyses/PostOrderCFGView.h" 29 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h" 30 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" 31 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" 32 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h" 33 #include "clang/Analysis/AnalysisDeclContext.h" 34 #include "clang/Analysis/CFG.h" 35 #include "clang/Basic/Builtins.h" 36 #include "clang/Basic/LLVM.h" 37 #include "clang/Basic/OperatorKinds.h" 38 #include "clang/Basic/SourceLocation.h" 39 #include "clang/Basic/Specifiers.h" 40 #include "llvm/ADT/ArrayRef.h" 41 #include "llvm/ADT/DenseMap.h" 42 #include "llvm/ADT/ImmutableMap.h" 43 #include "llvm/ADT/STLExtras.h" 44 #include "llvm/ADT/SmallVector.h" 45 #include "llvm/ADT/StringRef.h" 46 #include "llvm/Support/Allocator.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Support/ErrorHandling.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include <algorithm> 51 #include <cassert> 52 #include <functional> 53 #include <iterator> 54 #include <memory> 55 #include <optional> 56 #include <string> 57 #include <type_traits> 58 #include <utility> 59 #include <vector> 60 61 using namespace clang; 62 using namespace threadSafety; 63 64 // Key method definition 65 ThreadSafetyHandler::~ThreadSafetyHandler() = default; 66 67 /// Issue a warning about an invalid lock expression 68 static void warnInvalidLock(ThreadSafetyHandler &Handler, 69 const Expr *MutexExp, const NamedDecl *D, 70 const Expr *DeclExp, StringRef Kind) { 71 SourceLocation Loc; 72 if (DeclExp) 73 Loc = DeclExp->getExprLoc(); 74 75 // FIXME: add a note about the attribute location in MutexExp or D 76 if (Loc.isValid()) 77 Handler.handleInvalidLockExp(Loc); 78 } 79 80 namespace { 81 82 /// A set of CapabilityExpr objects, which are compiled from thread safety 83 /// attributes on a function. 84 class CapExprSet : public SmallVector<CapabilityExpr, 4> { 85 public: 86 /// Push M onto list, but discard duplicates. 87 void push_back_nodup(const CapabilityExpr &CapE) { 88 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) { 89 return CapE.equals(CapE2); 90 })) 91 push_back(CapE); 92 } 93 }; 94 95 class FactManager; 96 class FactSet; 97 98 /// This is a helper class that stores a fact that is known at a 99 /// particular point in program execution. Currently, a fact is a capability, 100 /// along with additional information, such as where it was acquired, whether 101 /// it is exclusive or shared, etc. 102 /// 103 /// FIXME: this analysis does not currently support re-entrant locking. 104 class FactEntry : public CapabilityExpr { 105 public: 106 enum FactEntryKind { Lockable, ScopedLockable }; 107 108 /// Where a fact comes from. 109 enum SourceKind { 110 Acquired, ///< The fact has been directly acquired. 111 Asserted, ///< The fact has been asserted to be held. 112 Declared, ///< The fact is assumed to be held by callers. 113 Managed, ///< The fact has been acquired through a scoped capability. 114 }; 115 116 private: 117 const FactEntryKind Kind : 8; 118 119 /// Exclusive or shared. 120 LockKind LKind : 8; 121 122 // How it was acquired. 123 SourceKind Source : 8; 124 125 /// Where it was acquired. 126 SourceLocation AcquireLoc; 127 128 public: 129 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK, 130 SourceLocation Loc, SourceKind Src) 131 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {} 132 virtual ~FactEntry() = default; 133 134 LockKind kind() const { return LKind; } 135 SourceLocation loc() const { return AcquireLoc; } 136 FactEntryKind getFactEntryKind() const { return Kind; } 137 138 bool asserted() const { return Source == Asserted; } 139 bool declared() const { return Source == Declared; } 140 bool managed() const { return Source == Managed; } 141 142 virtual void 143 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 144 SourceLocation JoinLoc, LockErrorKind LEK, 145 ThreadSafetyHandler &Handler) const = 0; 146 virtual void handleLock(FactSet &FSet, FactManager &FactMan, 147 const FactEntry &entry, 148 ThreadSafetyHandler &Handler) const = 0; 149 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, 150 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 151 bool FullyRemove, 152 ThreadSafetyHandler &Handler) const = 0; 153 154 // Return true if LKind >= LK, where exclusive > shared 155 bool isAtLeast(LockKind LK) const { 156 return (LKind == LK_Exclusive) || (LK == LK_Shared); 157 } 158 }; 159 160 using FactID = unsigned short; 161 162 /// FactManager manages the memory for all facts that are created during 163 /// the analysis of a single routine. 164 class FactManager { 165 private: 166 std::vector<std::unique_ptr<const FactEntry>> Facts; 167 168 public: 169 FactID newFact(std::unique_ptr<FactEntry> Entry) { 170 Facts.push_back(std::move(Entry)); 171 return static_cast<unsigned short>(Facts.size() - 1); 172 } 173 174 const FactEntry &operator[](FactID F) const { return *Facts[F]; } 175 }; 176 177 /// A FactSet is the set of facts that are known to be true at a 178 /// particular program point. FactSets must be small, because they are 179 /// frequently copied, and are thus implemented as a set of indices into a 180 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2 181 /// locks, so we can get away with doing a linear search for lookup. Note 182 /// that a hashtable or map is inappropriate in this case, because lookups 183 /// may involve partial pattern matches, rather than exact matches. 184 class FactSet { 185 private: 186 using FactVec = SmallVector<FactID, 4>; 187 188 FactVec FactIDs; 189 190 public: 191 using iterator = FactVec::iterator; 192 using const_iterator = FactVec::const_iterator; 193 194 iterator begin() { return FactIDs.begin(); } 195 const_iterator begin() const { return FactIDs.begin(); } 196 197 iterator end() { return FactIDs.end(); } 198 const_iterator end() const { return FactIDs.end(); } 199 200 bool isEmpty() const { return FactIDs.size() == 0; } 201 202 // Return true if the set contains only negative facts 203 bool isEmpty(FactManager &FactMan) const { 204 for (const auto FID : *this) { 205 if (!FactMan[FID].negative()) 206 return false; 207 } 208 return true; 209 } 210 211 void addLockByID(FactID ID) { FactIDs.push_back(ID); } 212 213 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { 214 FactID F = FM.newFact(std::move(Entry)); 215 FactIDs.push_back(F); 216 return F; 217 } 218 219 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { 220 unsigned n = FactIDs.size(); 221 if (n == 0) 222 return false; 223 224 for (unsigned i = 0; i < n-1; ++i) { 225 if (FM[FactIDs[i]].matches(CapE)) { 226 FactIDs[i] = FactIDs[n-1]; 227 FactIDs.pop_back(); 228 return true; 229 } 230 } 231 if (FM[FactIDs[n-1]].matches(CapE)) { 232 FactIDs.pop_back(); 233 return true; 234 } 235 return false; 236 } 237 238 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { 239 return std::find_if(begin(), end(), [&](FactID ID) { 240 return FM[ID].matches(CapE); 241 }); 242 } 243 244 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { 245 auto I = std::find_if(begin(), end(), [&](FactID ID) { 246 return FM[ID].matches(CapE); 247 }); 248 return I != end() ? &FM[*I] : nullptr; 249 } 250 251 const FactEntry *findLockUniv(FactManager &FM, 252 const CapabilityExpr &CapE) const { 253 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 254 return FM[ID].matchesUniv(CapE); 255 }); 256 return I != end() ? &FM[*I] : nullptr; 257 } 258 259 const FactEntry *findPartialMatch(FactManager &FM, 260 const CapabilityExpr &CapE) const { 261 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 262 return FM[ID].partiallyMatches(CapE); 263 }); 264 return I != end() ? &FM[*I] : nullptr; 265 } 266 267 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { 268 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 269 return FM[ID].valueDecl() == Vd; 270 }); 271 return I != end(); 272 } 273 }; 274 275 class ThreadSafetyAnalyzer; 276 277 } // namespace 278 279 namespace clang { 280 namespace threadSafety { 281 282 class BeforeSet { 283 private: 284 using BeforeVect = SmallVector<const ValueDecl *, 4>; 285 286 struct BeforeInfo { 287 BeforeVect Vect; 288 int Visited = 0; 289 290 BeforeInfo() = default; 291 BeforeInfo(BeforeInfo &&) = default; 292 }; 293 294 using BeforeMap = 295 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; 296 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; 297 298 public: 299 BeforeSet() = default; 300 301 BeforeInfo* insertAttrExprs(const ValueDecl* Vd, 302 ThreadSafetyAnalyzer& Analyzer); 303 304 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, 305 ThreadSafetyAnalyzer &Analyzer); 306 307 void checkBeforeAfter(const ValueDecl* Vd, 308 const FactSet& FSet, 309 ThreadSafetyAnalyzer& Analyzer, 310 SourceLocation Loc, StringRef CapKind); 311 312 private: 313 BeforeMap BMap; 314 CycleMap CycMap; 315 }; 316 317 } // namespace threadSafety 318 } // namespace clang 319 320 namespace { 321 322 class LocalVariableMap; 323 324 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; 325 326 /// A side (entry or exit) of a CFG node. 327 enum CFGBlockSide { CBS_Entry, CBS_Exit }; 328 329 /// CFGBlockInfo is a struct which contains all the information that is 330 /// maintained for each block in the CFG. See LocalVariableMap for more 331 /// information about the contexts. 332 struct CFGBlockInfo { 333 // Lockset held at entry to block 334 FactSet EntrySet; 335 336 // Lockset held at exit from block 337 FactSet ExitSet; 338 339 // Context held at entry to block 340 LocalVarContext EntryContext; 341 342 // Context held at exit from block 343 LocalVarContext ExitContext; 344 345 // Location of first statement in block 346 SourceLocation EntryLoc; 347 348 // Location of last statement in block. 349 SourceLocation ExitLoc; 350 351 // Used to replay contexts later 352 unsigned EntryIndex; 353 354 // Is this block reachable? 355 bool Reachable = false; 356 357 const FactSet &getSet(CFGBlockSide Side) const { 358 return Side == CBS_Entry ? EntrySet : ExitSet; 359 } 360 361 SourceLocation getLocation(CFGBlockSide Side) const { 362 return Side == CBS_Entry ? EntryLoc : ExitLoc; 363 } 364 365 private: 366 CFGBlockInfo(LocalVarContext EmptyCtx) 367 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} 368 369 public: 370 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 371 }; 372 373 // A LocalVariableMap maintains a map from local variables to their currently 374 // valid definitions. It provides SSA-like functionality when traversing the 375 // CFG. Like SSA, each definition or assignment to a variable is assigned a 376 // unique name (an integer), which acts as the SSA name for that definition. 377 // The total set of names is shared among all CFG basic blocks. 378 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs 379 // with their SSA-names. Instead, we compute a Context for each point in the 380 // code, which maps local variables to the appropriate SSA-name. This map 381 // changes with each assignment. 382 // 383 // The map is computed in a single pass over the CFG. Subsequent analyses can 384 // then query the map to find the appropriate Context for a statement, and use 385 // that Context to look up the definitions of variables. 386 class LocalVariableMap { 387 public: 388 using Context = LocalVarContext; 389 390 /// A VarDefinition consists of an expression, representing the value of the 391 /// variable, along with the context in which that expression should be 392 /// interpreted. A reference VarDefinition does not itself contain this 393 /// information, but instead contains a pointer to a previous VarDefinition. 394 struct VarDefinition { 395 public: 396 friend class LocalVariableMap; 397 398 // The original declaration for this variable. 399 const NamedDecl *Dec; 400 401 // The expression for this variable, OR 402 const Expr *Exp = nullptr; 403 404 // Reference to another VarDefinition 405 unsigned Ref = 0; 406 407 // The map with which Exp should be interpreted. 408 Context Ctx; 409 410 bool isReference() const { return !Exp; } 411 412 private: 413 // Create ordinary variable definition 414 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 415 : Dec(D), Exp(E), Ctx(C) {} 416 417 // Create reference to previous definition 418 VarDefinition(const NamedDecl *D, unsigned R, Context C) 419 : Dec(D), Ref(R), Ctx(C) {} 420 }; 421 422 private: 423 Context::Factory ContextFactory; 424 std::vector<VarDefinition> VarDefinitions; 425 std::vector<std::pair<const Stmt *, Context>> SavedContexts; 426 427 public: 428 LocalVariableMap() { 429 // index 0 is a placeholder for undefined variables (aka phi-nodes). 430 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); 431 } 432 433 /// Look up a definition, within the given context. 434 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 435 const unsigned *i = Ctx.lookup(D); 436 if (!i) 437 return nullptr; 438 assert(*i < VarDefinitions.size()); 439 return &VarDefinitions[*i]; 440 } 441 442 /// Look up the definition for D within the given context. Returns 443 /// NULL if the expression is not statically known. If successful, also 444 /// modifies Ctx to hold the context of the return Expr. 445 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 446 const unsigned *P = Ctx.lookup(D); 447 if (!P) 448 return nullptr; 449 450 unsigned i = *P; 451 while (i > 0) { 452 if (VarDefinitions[i].Exp) { 453 Ctx = VarDefinitions[i].Ctx; 454 return VarDefinitions[i].Exp; 455 } 456 i = VarDefinitions[i].Ref; 457 } 458 return nullptr; 459 } 460 461 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 462 463 /// Return the next context after processing S. This function is used by 464 /// clients of the class to get the appropriate context when traversing the 465 /// CFG. It must be called for every assignment or DeclStmt. 466 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { 467 if (SavedContexts[CtxIndex+1].first == S) { 468 CtxIndex++; 469 Context Result = SavedContexts[CtxIndex].second; 470 return Result; 471 } 472 return C; 473 } 474 475 void dumpVarDefinitionName(unsigned i) { 476 if (i == 0) { 477 llvm::errs() << "Undefined"; 478 return; 479 } 480 const NamedDecl *Dec = VarDefinitions[i].Dec; 481 if (!Dec) { 482 llvm::errs() << "<<NULL>>"; 483 return; 484 } 485 Dec->printName(llvm::errs()); 486 llvm::errs() << "." << i << " " << ((const void*) Dec); 487 } 488 489 /// Dumps an ASCII representation of the variable map to llvm::errs() 490 void dump() { 491 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 492 const Expr *Exp = VarDefinitions[i].Exp; 493 unsigned Ref = VarDefinitions[i].Ref; 494 495 dumpVarDefinitionName(i); 496 llvm::errs() << " = "; 497 if (Exp) Exp->dump(); 498 else { 499 dumpVarDefinitionName(Ref); 500 llvm::errs() << "\n"; 501 } 502 } 503 } 504 505 /// Dumps an ASCII representation of a Context to llvm::errs() 506 void dumpContext(Context C) { 507 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 508 const NamedDecl *D = I.getKey(); 509 D->printName(llvm::errs()); 510 llvm::errs() << " -> "; 511 dumpVarDefinitionName(I.getData()); 512 llvm::errs() << "\n"; 513 } 514 } 515 516 /// Builds the variable map. 517 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, 518 std::vector<CFGBlockInfo> &BlockInfo); 519 520 protected: 521 friend class VarMapBuilder; 522 523 // Get the current context index 524 unsigned getContextIndex() { return SavedContexts.size()-1; } 525 526 // Save the current context for later replay 527 void saveContext(const Stmt *S, Context C) { 528 SavedContexts.push_back(std::make_pair(S, C)); 529 } 530 531 // Adds a new definition to the given context, and returns a new context. 532 // This method should be called when declaring a new variable. 533 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { 534 assert(!Ctx.contains(D)); 535 unsigned newID = VarDefinitions.size(); 536 Context NewCtx = ContextFactory.add(Ctx, D, newID); 537 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 538 return NewCtx; 539 } 540 541 // Add a new reference to an existing definition. 542 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 543 unsigned newID = VarDefinitions.size(); 544 Context NewCtx = ContextFactory.add(Ctx, D, newID); 545 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 546 return NewCtx; 547 } 548 549 // Updates a definition only if that definition is already in the map. 550 // This method should be called when assigning to an existing variable. 551 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 552 if (Ctx.contains(D)) { 553 unsigned newID = VarDefinitions.size(); 554 Context NewCtx = ContextFactory.remove(Ctx, D); 555 NewCtx = ContextFactory.add(NewCtx, D, newID); 556 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 557 return NewCtx; 558 } 559 return Ctx; 560 } 561 562 // Removes a definition from the context, but keeps the variable name 563 // as a valid variable. The index 0 is a placeholder for cleared definitions. 564 Context clearDefinition(const NamedDecl *D, Context Ctx) { 565 Context NewCtx = Ctx; 566 if (NewCtx.contains(D)) { 567 NewCtx = ContextFactory.remove(NewCtx, D); 568 NewCtx = ContextFactory.add(NewCtx, D, 0); 569 } 570 return NewCtx; 571 } 572 573 // Remove a definition entirely frmo the context. 574 Context removeDefinition(const NamedDecl *D, Context Ctx) { 575 Context NewCtx = Ctx; 576 if (NewCtx.contains(D)) { 577 NewCtx = ContextFactory.remove(NewCtx, D); 578 } 579 return NewCtx; 580 } 581 582 Context intersectContexts(Context C1, Context C2); 583 Context createReferenceContext(Context C); 584 void intersectBackEdge(Context C1, Context C2); 585 }; 586 587 } // namespace 588 589 // This has to be defined after LocalVariableMap. 590 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 591 return CFGBlockInfo(M.getEmptyContext()); 592 } 593 594 namespace { 595 596 /// Visitor which builds a LocalVariableMap 597 class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { 598 public: 599 LocalVariableMap* VMap; 600 LocalVariableMap::Context Ctx; 601 602 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 603 : VMap(VM), Ctx(C) {} 604 605 void VisitDeclStmt(const DeclStmt *S); 606 void VisitBinaryOperator(const BinaryOperator *BO); 607 }; 608 609 } // namespace 610 611 // Add new local variables to the variable map 612 void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { 613 bool modifiedCtx = false; 614 const DeclGroupRef DGrp = S->getDeclGroup(); 615 for (const auto *D : DGrp) { 616 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { 617 const Expr *E = VD->getInit(); 618 619 // Add local variables with trivial type to the variable map 620 QualType T = VD->getType(); 621 if (T.isTrivialType(VD->getASTContext())) { 622 Ctx = VMap->addDefinition(VD, E, Ctx); 623 modifiedCtx = true; 624 } 625 } 626 } 627 if (modifiedCtx) 628 VMap->saveContext(S, Ctx); 629 } 630 631 // Update local variable definitions in variable map 632 void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { 633 if (!BO->isAssignmentOp()) 634 return; 635 636 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 637 638 // Update the variable map and current context. 639 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 640 const ValueDecl *VDec = DRE->getDecl(); 641 if (Ctx.lookup(VDec)) { 642 if (BO->getOpcode() == BO_Assign) 643 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 644 else 645 // FIXME -- handle compound assignment operators 646 Ctx = VMap->clearDefinition(VDec, Ctx); 647 VMap->saveContext(BO, Ctx); 648 } 649 } 650 } 651 652 // Computes the intersection of two contexts. The intersection is the 653 // set of variables which have the same definition in both contexts; 654 // variables with different definitions are discarded. 655 LocalVariableMap::Context 656 LocalVariableMap::intersectContexts(Context C1, Context C2) { 657 Context Result = C1; 658 for (const auto &P : C1) { 659 const NamedDecl *Dec = P.first; 660 const unsigned *i2 = C2.lookup(Dec); 661 if (!i2) // variable doesn't exist on second path 662 Result = removeDefinition(Dec, Result); 663 else if (*i2 != P.second) // variable exists, but has different definition 664 Result = clearDefinition(Dec, Result); 665 } 666 return Result; 667 } 668 669 // For every variable in C, create a new variable that refers to the 670 // definition in C. Return a new context that contains these new variables. 671 // (We use this for a naive implementation of SSA on loop back-edges.) 672 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 673 Context Result = getEmptyContext(); 674 for (const auto &P : C) 675 Result = addReference(P.first, P.second, Result); 676 return Result; 677 } 678 679 // This routine also takes the intersection of C1 and C2, but it does so by 680 // altering the VarDefinitions. C1 must be the result of an earlier call to 681 // createReferenceContext. 682 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 683 for (const auto &P : C1) { 684 unsigned i1 = P.second; 685 VarDefinition *VDef = &VarDefinitions[i1]; 686 assert(VDef->isReference()); 687 688 const unsigned *i2 = C2.lookup(P.first); 689 if (!i2 || (*i2 != i1)) 690 VDef->Ref = 0; // Mark this variable as undefined 691 } 692 } 693 694 // Traverse the CFG in topological order, so all predecessors of a block 695 // (excluding back-edges) are visited before the block itself. At 696 // each point in the code, we calculate a Context, which holds the set of 697 // variable definitions which are visible at that point in execution. 698 // Visible variables are mapped to their definitions using an array that 699 // contains all definitions. 700 // 701 // At join points in the CFG, the set is computed as the intersection of 702 // the incoming sets along each edge, E.g. 703 // 704 // { Context | VarDefinitions } 705 // int x = 0; { x -> x1 | x1 = 0 } 706 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 707 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 708 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 709 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 710 // 711 // This is essentially a simpler and more naive version of the standard SSA 712 // algorithm. Those definitions that remain in the intersection are from blocks 713 // that strictly dominate the current block. We do not bother to insert proper 714 // phi nodes, because they are not used in our analysis; instead, wherever 715 // a phi node would be required, we simply remove that definition from the 716 // context (E.g. x above). 717 // 718 // The initial traversal does not capture back-edges, so those need to be 719 // handled on a separate pass. Whenever the first pass encounters an 720 // incoming back edge, it duplicates the context, creating new definitions 721 // that refer back to the originals. (These correspond to places where SSA 722 // might have to insert a phi node.) On the second pass, these definitions are 723 // set to NULL if the variable has changed on the back-edge (i.e. a phi 724 // node was actually required.) E.g. 725 // 726 // { Context | VarDefinitions } 727 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 728 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 729 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 730 // ... { y -> y1 | x3 = 2, x2 = 1, ... } 731 void LocalVariableMap::traverseCFG(CFG *CFGraph, 732 const PostOrderCFGView *SortedGraph, 733 std::vector<CFGBlockInfo> &BlockInfo) { 734 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 735 736 for (const auto *CurrBlock : *SortedGraph) { 737 unsigned CurrBlockID = CurrBlock->getBlockID(); 738 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 739 740 VisitedBlocks.insert(CurrBlock); 741 742 // Calculate the entry context for the current block 743 bool HasBackEdges = false; 744 bool CtxInit = true; 745 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 746 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 747 // if *PI -> CurrBlock is a back edge, so skip it 748 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { 749 HasBackEdges = true; 750 continue; 751 } 752 753 unsigned PrevBlockID = (*PI)->getBlockID(); 754 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 755 756 if (CtxInit) { 757 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 758 CtxInit = false; 759 } 760 else { 761 CurrBlockInfo->EntryContext = 762 intersectContexts(CurrBlockInfo->EntryContext, 763 PrevBlockInfo->ExitContext); 764 } 765 } 766 767 // Duplicate the context if we have back-edges, so we can call 768 // intersectBackEdges later. 769 if (HasBackEdges) 770 CurrBlockInfo->EntryContext = 771 createReferenceContext(CurrBlockInfo->EntryContext); 772 773 // Create a starting context index for the current block 774 saveContext(nullptr, CurrBlockInfo->EntryContext); 775 CurrBlockInfo->EntryIndex = getContextIndex(); 776 777 // Visit all the statements in the basic block. 778 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 779 for (const auto &BI : *CurrBlock) { 780 switch (BI.getKind()) { 781 case CFGElement::Statement: { 782 CFGStmt CS = BI.castAs<CFGStmt>(); 783 VMapBuilder.Visit(CS.getStmt()); 784 break; 785 } 786 default: 787 break; 788 } 789 } 790 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 791 792 // Mark variables on back edges as "unknown" if they've been changed. 793 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 794 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 795 // if CurrBlock -> *SI is *not* a back edge 796 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 797 continue; 798 799 CFGBlock *FirstLoopBlock = *SI; 800 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 801 Context LoopEnd = CurrBlockInfo->ExitContext; 802 intersectBackEdge(LoopBegin, LoopEnd); 803 } 804 } 805 806 // Put an extra entry at the end of the indexed context array 807 unsigned exitID = CFGraph->getExit().getBlockID(); 808 saveContext(nullptr, BlockInfo[exitID].ExitContext); 809 } 810 811 /// Find the appropriate source locations to use when producing diagnostics for 812 /// each block in the CFG. 813 static void findBlockLocations(CFG *CFGraph, 814 const PostOrderCFGView *SortedGraph, 815 std::vector<CFGBlockInfo> &BlockInfo) { 816 for (const auto *CurrBlock : *SortedGraph) { 817 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 818 819 // Find the source location of the last statement in the block, if the 820 // block is not empty. 821 if (const Stmt *S = CurrBlock->getTerminatorStmt()) { 822 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); 823 } else { 824 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 825 BE = CurrBlock->rend(); BI != BE; ++BI) { 826 // FIXME: Handle other CFGElement kinds. 827 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 828 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); 829 break; 830 } 831 } 832 } 833 834 if (CurrBlockInfo->ExitLoc.isValid()) { 835 // This block contains at least one statement. Find the source location 836 // of the first statement in the block. 837 for (const auto &BI : *CurrBlock) { 838 // FIXME: Handle other CFGElement kinds. 839 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { 840 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); 841 break; 842 } 843 } 844 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 845 CurrBlock != &CFGraph->getExit()) { 846 // The block is empty, and has a single predecessor. Use its exit 847 // location. 848 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 849 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 850 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) { 851 // The block is empty, and has a single successor. Use its entry 852 // location. 853 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 854 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc; 855 } 856 } 857 } 858 859 namespace { 860 861 class LockableFactEntry : public FactEntry { 862 public: 863 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 864 SourceKind Src = Acquired) 865 : FactEntry(Lockable, CE, LK, Loc, Src) {} 866 867 void 868 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 869 SourceLocation JoinLoc, LockErrorKind LEK, 870 ThreadSafetyHandler &Handler) const override { 871 if (!asserted() && !negative() && !isUniversal()) { 872 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc, 873 LEK); 874 } 875 } 876 877 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 878 ThreadSafetyHandler &Handler) const override { 879 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(), 880 entry.loc()); 881 } 882 883 void handleUnlock(FactSet &FSet, FactManager &FactMan, 884 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 885 bool FullyRemove, 886 ThreadSafetyHandler &Handler) const override { 887 FSet.removeLock(FactMan, Cp); 888 if (!Cp.negative()) { 889 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 890 !Cp, LK_Exclusive, UnlockLoc)); 891 } 892 } 893 894 static bool classof(const FactEntry *A) { 895 return A->getFactEntryKind() == Lockable; 896 } 897 }; 898 899 class ScopedLockableFactEntry : public FactEntry { 900 private: 901 enum UnderlyingCapabilityKind { 902 UCK_Acquired, ///< Any kind of acquired capability. 903 UCK_ReleasedShared, ///< Shared capability that was released. 904 UCK_ReleasedExclusive, ///< Exclusive capability that was released. 905 }; 906 907 struct UnderlyingCapability { 908 CapabilityExpr Cap; 909 UnderlyingCapabilityKind Kind; 910 }; 911 912 SmallVector<UnderlyingCapability, 2> UnderlyingMutexes; 913 914 public: 915 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc, 916 SourceKind Src) 917 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src) {} 918 919 CapExprSet getUnderlyingMutexes() const { 920 CapExprSet UnderlyingMutexesSet; 921 for (const UnderlyingCapability &UnderlyingMutex : UnderlyingMutexes) 922 UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap); 923 return UnderlyingMutexesSet; 924 } 925 926 void addLock(const CapabilityExpr &M) { 927 UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired}); 928 } 929 930 void addExclusiveUnlock(const CapabilityExpr &M) { 931 UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive}); 932 } 933 934 void addSharedUnlock(const CapabilityExpr &M) { 935 UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared}); 936 } 937 938 void 939 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 940 SourceLocation JoinLoc, LockErrorKind LEK, 941 ThreadSafetyHandler &Handler) const override { 942 if (LEK == LEK_LockedAtEndOfFunction || LEK == LEK_NotLockedAtEndOfFunction) 943 return; 944 945 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 946 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap); 947 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) || 948 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) { 949 // If this scoped lock manages another mutex, and if the underlying 950 // mutex is still/not held, then warn about the underlying mutex. 951 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(), 952 UnderlyingMutex.Cap.toString(), loc(), 953 JoinLoc, LEK); 954 } 955 } 956 } 957 958 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 959 ThreadSafetyHandler &Handler) const override { 960 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 961 if (UnderlyingMutex.Kind == UCK_Acquired) 962 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(), 963 &Handler); 964 else 965 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler); 966 } 967 } 968 969 void handleUnlock(FactSet &FSet, FactManager &FactMan, 970 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 971 bool FullyRemove, 972 ThreadSafetyHandler &Handler) const override { 973 assert(!Cp.negative() && "Managing object cannot be negative."); 974 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 975 // Remove/lock the underlying mutex if it exists/is still unlocked; warn 976 // on double unlocking/locking if we're not destroying the scoped object. 977 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler; 978 if (UnderlyingMutex.Kind == UCK_Acquired) { 979 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler); 980 } else { 981 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared 982 ? LK_Shared 983 : LK_Exclusive; 984 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler); 985 } 986 } 987 if (FullyRemove) 988 FSet.removeLock(FactMan, Cp); 989 } 990 991 static bool classof(const FactEntry *A) { 992 return A->getFactEntryKind() == ScopedLockable; 993 } 994 995 private: 996 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, 997 LockKind kind, SourceLocation loc, 998 ThreadSafetyHandler *Handler) const { 999 if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) { 1000 if (Handler) 1001 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(), 1002 loc); 1003 } else { 1004 FSet.removeLock(FactMan, !Cp); 1005 FSet.addLock(FactMan, 1006 std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed)); 1007 } 1008 } 1009 1010 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, 1011 SourceLocation loc, ThreadSafetyHandler *Handler) const { 1012 if (FSet.findLock(FactMan, Cp)) { 1013 FSet.removeLock(FactMan, Cp); 1014 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 1015 !Cp, LK_Exclusive, loc)); 1016 } else if (Handler) { 1017 SourceLocation PrevLoc; 1018 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) 1019 PrevLoc = Neg->loc(); 1020 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc); 1021 } 1022 } 1023 }; 1024 1025 /// Class which implements the core thread safety analysis routines. 1026 class ThreadSafetyAnalyzer { 1027 friend class BuildLockset; 1028 friend class threadSafety::BeforeSet; 1029 1030 llvm::BumpPtrAllocator Bpa; 1031 threadSafety::til::MemRegionRef Arena; 1032 threadSafety::SExprBuilder SxBuilder; 1033 1034 ThreadSafetyHandler &Handler; 1035 const FunctionDecl *CurrentFunction; 1036 LocalVariableMap LocalVarMap; 1037 // Maps constructed objects to `this` placeholder prior to initialization. 1038 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects; 1039 FactManager FactMan; 1040 std::vector<CFGBlockInfo> BlockInfo; 1041 1042 BeforeSet *GlobalBeforeSet; 1043 1044 public: 1045 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) 1046 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} 1047 1048 bool inCurrentScope(const CapabilityExpr &CapE); 1049 1050 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, 1051 bool ReqAttr = false); 1052 void removeLock(FactSet &FSet, const CapabilityExpr &CapE, 1053 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind); 1054 1055 template <typename AttrType> 1056 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1057 const NamedDecl *D, til::SExpr *Self = nullptr); 1058 1059 template <class AttrType> 1060 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1061 const NamedDecl *D, 1062 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1063 Expr *BrE, bool Neg); 1064 1065 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1066 bool &Negate); 1067 1068 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1069 const CFGBlock* PredBlock, 1070 const CFGBlock *CurrBlock); 1071 1072 bool join(const FactEntry &a, const FactEntry &b, bool CanModify); 1073 1074 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet, 1075 SourceLocation JoinLoc, LockErrorKind EntryLEK, 1076 LockErrorKind ExitLEK); 1077 1078 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet, 1079 SourceLocation JoinLoc, LockErrorKind LEK) { 1080 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK); 1081 } 1082 1083 void runAnalysis(AnalysisDeclContext &AC); 1084 1085 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D, 1086 const Expr *Exp, AccessKind AK, Expr *MutexExp, 1087 ProtectedOperationKind POK, til::LiteralPtr *Self, 1088 SourceLocation Loc); 1089 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp, 1090 Expr *MutexExp, til::LiteralPtr *Self, 1091 SourceLocation Loc); 1092 1093 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK, 1094 ProtectedOperationKind POK); 1095 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK, 1096 ProtectedOperationKind POK); 1097 }; 1098 1099 } // namespace 1100 1101 /// Process acquired_before and acquired_after attributes on Vd. 1102 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, 1103 ThreadSafetyAnalyzer& Analyzer) { 1104 // Create a new entry for Vd. 1105 BeforeInfo *Info = nullptr; 1106 { 1107 // Keep InfoPtr in its own scope in case BMap is modified later and the 1108 // reference becomes invalid. 1109 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; 1110 if (!InfoPtr) 1111 InfoPtr.reset(new BeforeInfo()); 1112 Info = InfoPtr.get(); 1113 } 1114 1115 for (const auto *At : Vd->attrs()) { 1116 switch (At->getKind()) { 1117 case attr::AcquiredBefore: { 1118 const auto *A = cast<AcquiredBeforeAttr>(At); 1119 1120 // Read exprs from the attribute, and add them to BeforeVect. 1121 for (const auto *Arg : A->args()) { 1122 CapabilityExpr Cp = 1123 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1124 if (const ValueDecl *Cpvd = Cp.valueDecl()) { 1125 Info->Vect.push_back(Cpvd); 1126 const auto It = BMap.find(Cpvd); 1127 if (It == BMap.end()) 1128 insertAttrExprs(Cpvd, Analyzer); 1129 } 1130 } 1131 break; 1132 } 1133 case attr::AcquiredAfter: { 1134 const auto *A = cast<AcquiredAfterAttr>(At); 1135 1136 // Read exprs from the attribute, and add them to BeforeVect. 1137 for (const auto *Arg : A->args()) { 1138 CapabilityExpr Cp = 1139 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1140 if (const ValueDecl *ArgVd = Cp.valueDecl()) { 1141 // Get entry for mutex listed in attribute 1142 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); 1143 ArgInfo->Vect.push_back(Vd); 1144 } 1145 } 1146 break; 1147 } 1148 default: 1149 break; 1150 } 1151 } 1152 1153 return Info; 1154 } 1155 1156 BeforeSet::BeforeInfo * 1157 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, 1158 ThreadSafetyAnalyzer &Analyzer) { 1159 auto It = BMap.find(Vd); 1160 BeforeInfo *Info = nullptr; 1161 if (It == BMap.end()) 1162 Info = insertAttrExprs(Vd, Analyzer); 1163 else 1164 Info = It->second.get(); 1165 assert(Info && "BMap contained nullptr?"); 1166 return Info; 1167 } 1168 1169 /// Return true if any mutexes in FSet are in the acquired_before set of Vd. 1170 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, 1171 const FactSet& FSet, 1172 ThreadSafetyAnalyzer& Analyzer, 1173 SourceLocation Loc, StringRef CapKind) { 1174 SmallVector<BeforeInfo*, 8> InfoVect; 1175 1176 // Do a depth-first traversal of Vd. 1177 // Return true if there are cycles. 1178 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { 1179 if (!Vd) 1180 return false; 1181 1182 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); 1183 1184 if (Info->Visited == 1) 1185 return true; 1186 1187 if (Info->Visited == 2) 1188 return false; 1189 1190 if (Info->Vect.empty()) 1191 return false; 1192 1193 InfoVect.push_back(Info); 1194 Info->Visited = 1; 1195 for (const auto *Vdb : Info->Vect) { 1196 // Exclude mutexes in our immediate before set. 1197 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { 1198 StringRef L1 = StartVd->getName(); 1199 StringRef L2 = Vdb->getName(); 1200 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); 1201 } 1202 // Transitively search other before sets, and warn on cycles. 1203 if (traverse(Vdb)) { 1204 if (CycMap.try_emplace(Vd, true).second) { 1205 StringRef L1 = Vd->getName(); 1206 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); 1207 } 1208 } 1209 } 1210 Info->Visited = 2; 1211 return false; 1212 }; 1213 1214 traverse(StartVd); 1215 1216 for (auto *Info : InfoVect) 1217 Info->Visited = 0; 1218 } 1219 1220 /// Gets the value decl pointer from DeclRefExprs or MemberExprs. 1221 static const ValueDecl *getValueDecl(const Expr *Exp) { 1222 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1223 return getValueDecl(CE->getSubExpr()); 1224 1225 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) 1226 return DR->getDecl(); 1227 1228 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) 1229 return ME->getMemberDecl(); 1230 1231 return nullptr; 1232 } 1233 1234 namespace { 1235 1236 template <typename Ty> 1237 class has_arg_iterator_range { 1238 using yes = char[1]; 1239 using no = char[2]; 1240 1241 template <typename Inner> 1242 static yes& test(Inner *I, decltype(I->args()) * = nullptr); 1243 1244 template <typename> 1245 static no& test(...); 1246 1247 public: 1248 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); 1249 }; 1250 1251 } // namespace 1252 1253 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { 1254 const threadSafety::til::SExpr *SExp = CapE.sexpr(); 1255 assert(SExp && "Null expressions should be ignored"); 1256 1257 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) { 1258 const ValueDecl *VD = LP->clangDecl(); 1259 // Variables defined in a function are always inaccessible. 1260 if (!VD || !VD->isDefinedOutsideFunctionOrMethod()) 1261 return false; 1262 // For now we consider static class members to be inaccessible. 1263 if (isa<CXXRecordDecl>(VD->getDeclContext())) 1264 return false; 1265 // Global variables are always in scope. 1266 return true; 1267 } 1268 1269 // Members are in scope from methods of the same class. 1270 if (const auto *P = dyn_cast<til::Project>(SExp)) { 1271 if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction)) 1272 return false; 1273 const ValueDecl *VD = P->clangDecl(); 1274 return VD->getDeclContext() == CurrentFunction->getDeclContext(); 1275 } 1276 1277 return false; 1278 } 1279 1280 /// Add a new lock to the lockset, warning if the lock is already there. 1281 /// \param ReqAttr -- true if this is part of an initial Requires attribute. 1282 void ThreadSafetyAnalyzer::addLock(FactSet &FSet, 1283 std::unique_ptr<FactEntry> Entry, 1284 bool ReqAttr) { 1285 if (Entry->shouldIgnore()) 1286 return; 1287 1288 if (!ReqAttr && !Entry->negative()) { 1289 // look for the negative capability, and remove it from the fact set. 1290 CapabilityExpr NegC = !*Entry; 1291 const FactEntry *Nen = FSet.findLock(FactMan, NegC); 1292 if (Nen) { 1293 FSet.removeLock(FactMan, NegC); 1294 } 1295 else { 1296 if (inCurrentScope(*Entry) && !Entry->asserted()) 1297 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(), 1298 NegC.toString(), Entry->loc()); 1299 } 1300 } 1301 1302 // Check before/after constraints 1303 if (Handler.issueBetaWarnings() && 1304 !Entry->asserted() && !Entry->declared()) { 1305 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, 1306 Entry->loc(), Entry->getKind()); 1307 } 1308 1309 // FIXME: Don't always warn when we have support for reentrant locks. 1310 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { 1311 if (!Entry->asserted()) 1312 Cp->handleLock(FSet, FactMan, *Entry, Handler); 1313 } else { 1314 FSet.addLock(FactMan, std::move(Entry)); 1315 } 1316 } 1317 1318 /// Remove a lock from the lockset, warning if the lock is not there. 1319 /// \param UnlockLoc The source location of the unlock (only used in error msg) 1320 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, 1321 SourceLocation UnlockLoc, 1322 bool FullyRemove, LockKind ReceivedKind) { 1323 if (Cp.shouldIgnore()) 1324 return; 1325 1326 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1327 if (!LDat) { 1328 SourceLocation PrevLoc; 1329 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) 1330 PrevLoc = Neg->loc(); 1331 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc, 1332 PrevLoc); 1333 return; 1334 } 1335 1336 // Generic lock removal doesn't care about lock kind mismatches, but 1337 // otherwise diagnose when the lock kinds are mismatched. 1338 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { 1339 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(), 1340 ReceivedKind, LDat->loc(), UnlockLoc); 1341 } 1342 1343 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler); 1344 } 1345 1346 /// Extract the list of mutexIDs from the attribute on an expression, 1347 /// and push them onto Mtxs, discarding any duplicates. 1348 template <typename AttrType> 1349 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1350 const Expr *Exp, const NamedDecl *D, 1351 til::SExpr *Self) { 1352 if (Attr->args_size() == 0) { 1353 // The mutex held is the "this" object. 1354 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self); 1355 if (Cp.isInvalid()) { 1356 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind()); 1357 return; 1358 } 1359 //else 1360 if (!Cp.shouldIgnore()) 1361 Mtxs.push_back_nodup(Cp); 1362 return; 1363 } 1364 1365 for (const auto *Arg : Attr->args()) { 1366 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self); 1367 if (Cp.isInvalid()) { 1368 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind()); 1369 continue; 1370 } 1371 //else 1372 if (!Cp.shouldIgnore()) 1373 Mtxs.push_back_nodup(Cp); 1374 } 1375 } 1376 1377 /// Extract the list of mutexIDs from a trylock attribute. If the 1378 /// trylock applies to the given edge, then push them onto Mtxs, discarding 1379 /// any duplicates. 1380 template <class AttrType> 1381 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1382 const Expr *Exp, const NamedDecl *D, 1383 const CFGBlock *PredBlock, 1384 const CFGBlock *CurrBlock, 1385 Expr *BrE, bool Neg) { 1386 // Find out which branch has the lock 1387 bool branch = false; 1388 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) 1389 branch = BLE->getValue(); 1390 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) 1391 branch = ILE->getValue().getBoolValue(); 1392 1393 int branchnum = branch ? 0 : 1; 1394 if (Neg) 1395 branchnum = !branchnum; 1396 1397 // If we've taken the trylock branch, then add the lock 1398 int i = 0; 1399 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1400 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1401 if (*SI == CurrBlock && i == branchnum) 1402 getMutexIDs(Mtxs, Attr, Exp, D); 1403 } 1404 } 1405 1406 static bool getStaticBooleanValue(Expr *E, bool &TCond) { 1407 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1408 TCond = false; 1409 return true; 1410 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1411 TCond = BLE->getValue(); 1412 return true; 1413 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { 1414 TCond = ILE->getValue().getBoolValue(); 1415 return true; 1416 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) 1417 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1418 return false; 1419 } 1420 1421 // If Cond can be traced back to a function call, return the call expression. 1422 // The negate variable should be called with false, and will be set to true 1423 // if the function call is negated, e.g. if (!mu.tryLock(...)) 1424 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1425 LocalVarContext C, 1426 bool &Negate) { 1427 if (!Cond) 1428 return nullptr; 1429 1430 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) { 1431 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect) 1432 return getTrylockCallExpr(CallExp->getArg(0), C, Negate); 1433 return CallExp; 1434 } 1435 else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) 1436 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1437 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) 1438 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1439 else if (const auto *FE = dyn_cast<FullExpr>(Cond)) 1440 return getTrylockCallExpr(FE->getSubExpr(), C, Negate); 1441 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1442 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1443 return getTrylockCallExpr(E, C, Negate); 1444 } 1445 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { 1446 if (UOP->getOpcode() == UO_LNot) { 1447 Negate = !Negate; 1448 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1449 } 1450 return nullptr; 1451 } 1452 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { 1453 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1454 if (BOP->getOpcode() == BO_NE) 1455 Negate = !Negate; 1456 1457 bool TCond = false; 1458 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1459 if (!TCond) Negate = !Negate; 1460 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1461 } 1462 TCond = false; 1463 if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1464 if (!TCond) Negate = !Negate; 1465 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1466 } 1467 return nullptr; 1468 } 1469 if (BOP->getOpcode() == BO_LAnd) { 1470 // LHS must have been evaluated in a different block. 1471 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1472 } 1473 if (BOP->getOpcode() == BO_LOr) 1474 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1475 return nullptr; 1476 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) { 1477 bool TCond, FCond; 1478 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) && 1479 getStaticBooleanValue(COP->getFalseExpr(), FCond)) { 1480 if (TCond && !FCond) 1481 return getTrylockCallExpr(COP->getCond(), C, Negate); 1482 if (!TCond && FCond) { 1483 Negate = !Negate; 1484 return getTrylockCallExpr(COP->getCond(), C, Negate); 1485 } 1486 } 1487 } 1488 return nullptr; 1489 } 1490 1491 /// Find the lockset that holds on the edge between PredBlock 1492 /// and CurrBlock. The edge set is the exit set of PredBlock (passed 1493 /// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1494 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1495 const FactSet &ExitSet, 1496 const CFGBlock *PredBlock, 1497 const CFGBlock *CurrBlock) { 1498 Result = ExitSet; 1499 1500 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1501 // We don't acquire try-locks on ?: branches, only when its result is used. 1502 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt())) 1503 return; 1504 1505 bool Negate = false; 1506 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1507 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1508 1509 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1510 if (!Exp) 1511 return; 1512 1513 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1514 if(!FunDecl || !FunDecl->hasAttrs()) 1515 return; 1516 1517 CapExprSet ExclusiveLocksToAdd; 1518 CapExprSet SharedLocksToAdd; 1519 1520 // If the condition is a call to a Trylock function, then grab the attributes 1521 for (const auto *Attr : FunDecl->attrs()) { 1522 switch (Attr->getKind()) { 1523 case attr::TryAcquireCapability: { 1524 auto *A = cast<TryAcquireCapabilityAttr>(Attr); 1525 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 1526 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), 1527 Negate); 1528 break; 1529 }; 1530 case attr::ExclusiveTrylockFunction: { 1531 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); 1532 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock, 1533 A->getSuccessValue(), Negate); 1534 break; 1535 } 1536 case attr::SharedTrylockFunction: { 1537 const auto *A = cast<SharedTrylockFunctionAttr>(Attr); 1538 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock, 1539 A->getSuccessValue(), Negate); 1540 break; 1541 } 1542 default: 1543 break; 1544 } 1545 } 1546 1547 // Add and remove locks. 1548 SourceLocation Loc = Exp->getExprLoc(); 1549 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) 1550 addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd, 1551 LK_Exclusive, Loc)); 1552 for (const auto &SharedLockToAdd : SharedLocksToAdd) 1553 addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd, 1554 LK_Shared, Loc)); 1555 } 1556 1557 namespace { 1558 1559 /// We use this class to visit different types of expressions in 1560 /// CFGBlocks, and build up the lockset. 1561 /// An expression may cause us to add or remove locks from the lockset, or else 1562 /// output error messages related to missing locks. 1563 /// FIXME: In future, we may be able to not inherit from a visitor. 1564 class BuildLockset : public ConstStmtVisitor<BuildLockset> { 1565 friend class ThreadSafetyAnalyzer; 1566 1567 ThreadSafetyAnalyzer *Analyzer; 1568 FactSet FSet; 1569 // The fact set for the function on exit. 1570 const FactSet &FunctionExitFSet; 1571 LocalVariableMap::Context LVarCtx; 1572 unsigned CtxIndex; 1573 1574 // helper functions 1575 1576 void checkAccess(const Expr *Exp, AccessKind AK, 1577 ProtectedOperationKind POK = POK_VarAccess) { 1578 Analyzer->checkAccess(FSet, Exp, AK, POK); 1579 } 1580 void checkPtAccess(const Expr *Exp, AccessKind AK, 1581 ProtectedOperationKind POK = POK_VarAccess) { 1582 Analyzer->checkPtAccess(FSet, Exp, AK, POK); 1583 } 1584 1585 void handleCall(const Expr *Exp, const NamedDecl *D, 1586 til::LiteralPtr *Self = nullptr, 1587 SourceLocation Loc = SourceLocation()); 1588 void examineArguments(const FunctionDecl *FD, 1589 CallExpr::const_arg_iterator ArgBegin, 1590 CallExpr::const_arg_iterator ArgEnd, 1591 bool SkipFirstParam = false); 1592 1593 public: 1594 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info, 1595 const FactSet &FunctionExitFSet) 1596 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), 1597 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext), 1598 CtxIndex(Info.EntryIndex) {} 1599 1600 void VisitUnaryOperator(const UnaryOperator *UO); 1601 void VisitBinaryOperator(const BinaryOperator *BO); 1602 void VisitCastExpr(const CastExpr *CE); 1603 void VisitCallExpr(const CallExpr *Exp); 1604 void VisitCXXConstructExpr(const CXXConstructExpr *Exp); 1605 void VisitDeclStmt(const DeclStmt *S); 1606 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp); 1607 void VisitReturnStmt(const ReturnStmt *S); 1608 }; 1609 1610 } // namespace 1611 1612 /// Warn if the LSet does not contain a lock sufficient to protect access 1613 /// of at least the passed in AccessKind. 1614 void ThreadSafetyAnalyzer::warnIfMutexNotHeld( 1615 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK, 1616 Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self, 1617 SourceLocation Loc) { 1618 LockKind LK = getLockKindFromAccessKind(AK); 1619 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self); 1620 if (Cp.isInvalid()) { 1621 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind()); 1622 return; 1623 } else if (Cp.shouldIgnore()) { 1624 return; 1625 } 1626 1627 if (Cp.negative()) { 1628 // Negative capabilities act like locks excluded 1629 const FactEntry *LDat = FSet.findLock(FactMan, !Cp); 1630 if (LDat) { 1631 Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(), 1632 (!Cp).toString(), Loc); 1633 return; 1634 } 1635 1636 // If this does not refer to a negative capability in the same class, 1637 // then stop here. 1638 if (!inCurrentScope(Cp)) 1639 return; 1640 1641 // Otherwise the negative requirement must be propagated to the caller. 1642 LDat = FSet.findLock(FactMan, Cp); 1643 if (!LDat) { 1644 Handler.handleNegativeNotHeld(D, Cp.toString(), Loc); 1645 } 1646 return; 1647 } 1648 1649 const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp); 1650 bool NoError = true; 1651 if (!LDat) { 1652 // No exact match found. Look for a partial match. 1653 LDat = FSet.findPartialMatch(FactMan, Cp); 1654 if (LDat) { 1655 // Warn that there's no precise match. 1656 std::string PartMatchStr = LDat->toString(); 1657 StringRef PartMatchName(PartMatchStr); 1658 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc, 1659 &PartMatchName); 1660 } else { 1661 // Warn that there's no match at all. 1662 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc); 1663 } 1664 NoError = false; 1665 } 1666 // Make sure the mutex we found is the right kind. 1667 if (NoError && LDat && !LDat->isAtLeast(LK)) { 1668 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc); 1669 } 1670 } 1671 1672 /// Warn if the LSet contains the given lock. 1673 void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet, 1674 const NamedDecl *D, const Expr *Exp, 1675 Expr *MutexExp, 1676 til::LiteralPtr *Self, 1677 SourceLocation Loc) { 1678 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self); 1679 if (Cp.isInvalid()) { 1680 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind()); 1681 return; 1682 } else if (Cp.shouldIgnore()) { 1683 return; 1684 } 1685 1686 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1687 if (LDat) { 1688 Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(), 1689 Cp.toString(), Loc); 1690 } 1691 } 1692 1693 /// Checks guarded_by and pt_guarded_by attributes. 1694 /// Whenever we identify an access (read or write) to a DeclRefExpr that is 1695 /// marked with guarded_by, we must ensure the appropriate mutexes are held. 1696 /// Similarly, we check if the access is to an expression that dereferences 1697 /// a pointer marked with pt_guarded_by. 1698 void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp, 1699 AccessKind AK, 1700 ProtectedOperationKind POK) { 1701 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); 1702 1703 SourceLocation Loc = Exp->getExprLoc(); 1704 1705 // Local variables of reference type cannot be re-assigned; 1706 // map them to their initializer. 1707 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { 1708 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); 1709 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { 1710 if (const auto *E = VD->getInit()) { 1711 // Guard against self-initialization. e.g., int &i = i; 1712 if (E == Exp) 1713 break; 1714 Exp = E; 1715 continue; 1716 } 1717 } 1718 break; 1719 } 1720 1721 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { 1722 // For dereferences 1723 if (UO->getOpcode() == UO_Deref) 1724 checkPtAccess(FSet, UO->getSubExpr(), AK, POK); 1725 return; 1726 } 1727 1728 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) { 1729 switch (BO->getOpcode()) { 1730 case BO_PtrMemD: // .* 1731 return checkAccess(FSet, BO->getLHS(), AK, POK); 1732 case BO_PtrMemI: // ->* 1733 return checkPtAccess(FSet, BO->getLHS(), AK, POK); 1734 default: 1735 return; 1736 } 1737 } 1738 1739 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { 1740 checkPtAccess(FSet, AE->getLHS(), AK, POK); 1741 return; 1742 } 1743 1744 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { 1745 if (ME->isArrow()) 1746 checkPtAccess(FSet, ME->getBase(), AK, POK); 1747 else 1748 checkAccess(FSet, ME->getBase(), AK, POK); 1749 } 1750 1751 const ValueDecl *D = getValueDecl(Exp); 1752 if (!D || !D->hasAttrs()) 1753 return; 1754 1755 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) { 1756 Handler.handleNoMutexHeld(D, POK, AK, Loc); 1757 } 1758 1759 for (const auto *I : D->specific_attrs<GuardedByAttr>()) 1760 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc); 1761 } 1762 1763 /// Checks pt_guarded_by and pt_guarded_var attributes. 1764 /// POK is the same operationKind that was passed to checkAccess. 1765 void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp, 1766 AccessKind AK, 1767 ProtectedOperationKind POK) { 1768 while (true) { 1769 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { 1770 Exp = PE->getSubExpr(); 1771 continue; 1772 } 1773 if (const auto *CE = dyn_cast<CastExpr>(Exp)) { 1774 if (CE->getCastKind() == CK_ArrayToPointerDecay) { 1775 // If it's an actual array, and not a pointer, then it's elements 1776 // are protected by GUARDED_BY, not PT_GUARDED_BY; 1777 checkAccess(FSet, CE->getSubExpr(), AK, POK); 1778 return; 1779 } 1780 Exp = CE->getSubExpr(); 1781 continue; 1782 } 1783 break; 1784 } 1785 1786 // Pass by reference warnings are under a different flag. 1787 ProtectedOperationKind PtPOK = POK_VarDereference; 1788 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; 1789 if (POK == POK_ReturnByRef) 1790 PtPOK = POK_PtReturnByRef; 1791 1792 const ValueDecl *D = getValueDecl(Exp); 1793 if (!D || !D->hasAttrs()) 1794 return; 1795 1796 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan)) 1797 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc()); 1798 1799 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) 1800 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr, 1801 Exp->getExprLoc()); 1802 } 1803 1804 /// Process a function call, method call, constructor call, 1805 /// or destructor call. This involves looking at the attributes on the 1806 /// corresponding function/method/constructor/destructor, issuing warnings, 1807 /// and updating the locksets accordingly. 1808 /// 1809 /// FIXME: For classes annotated with one of the guarded annotations, we need 1810 /// to treat const method calls as reads and non-const method calls as writes, 1811 /// and check that the appropriate locks are held. Non-const method calls with 1812 /// the same signature as const method calls can be also treated as reads. 1813 /// 1814 /// \param Exp The call expression. 1815 /// \param D The callee declaration. 1816 /// \param Self If \p Exp = nullptr, the implicit this argument or the argument 1817 /// of an implicitly called cleanup function. 1818 /// \param Loc If \p Exp = nullptr, the location. 1819 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, 1820 til::LiteralPtr *Self, SourceLocation Loc) { 1821 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; 1822 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; 1823 CapExprSet ScopedReqsAndExcludes; 1824 1825 // Figure out if we're constructing an object of scoped lockable class 1826 CapabilityExpr Scp; 1827 if (Exp) { 1828 assert(!Self); 1829 const auto *TagT = Exp->getType()->getAs<TagType>(); 1830 if (D->hasAttrs() && TagT && Exp->isPRValue()) { 1831 std::pair<til::LiteralPtr *, StringRef> Placeholder = 1832 Analyzer->SxBuilder.createThisPlaceholder(Exp); 1833 [[maybe_unused]] auto inserted = 1834 Analyzer->ConstructedObjects.insert({Exp, Placeholder.first}); 1835 assert(inserted.second && "Are we visiting the same expression again?"); 1836 if (isa<CXXConstructExpr>(Exp)) 1837 Self = Placeholder.first; 1838 if (TagT->getDecl()->hasAttr<ScopedLockableAttr>()) 1839 Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false); 1840 } 1841 1842 assert(Loc.isInvalid()); 1843 Loc = Exp->getExprLoc(); 1844 } 1845 1846 for(const Attr *At : D->attrs()) { 1847 switch (At->getKind()) { 1848 // When we encounter a lock function, we need to add the lock to our 1849 // lockset. 1850 case attr::AcquireCapability: { 1851 const auto *A = cast<AcquireCapabilityAttr>(At); 1852 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1853 : ExclusiveLocksToAdd, 1854 A, Exp, D, Self); 1855 break; 1856 } 1857 1858 // An assert will add a lock to the lockset, but will not generate 1859 // a warning if it is already there, and will not generate a warning 1860 // if it is not removed. 1861 case attr::AssertExclusiveLock: { 1862 const auto *A = cast<AssertExclusiveLockAttr>(At); 1863 1864 CapExprSet AssertLocks; 1865 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self); 1866 for (const auto &AssertLock : AssertLocks) 1867 Analyzer->addLock( 1868 FSet, std::make_unique<LockableFactEntry>( 1869 AssertLock, LK_Exclusive, Loc, FactEntry::Asserted)); 1870 break; 1871 } 1872 case attr::AssertSharedLock: { 1873 const auto *A = cast<AssertSharedLockAttr>(At); 1874 1875 CapExprSet AssertLocks; 1876 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self); 1877 for (const auto &AssertLock : AssertLocks) 1878 Analyzer->addLock( 1879 FSet, std::make_unique<LockableFactEntry>( 1880 AssertLock, LK_Shared, Loc, FactEntry::Asserted)); 1881 break; 1882 } 1883 1884 case attr::AssertCapability: { 1885 const auto *A = cast<AssertCapabilityAttr>(At); 1886 CapExprSet AssertLocks; 1887 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self); 1888 for (const auto &AssertLock : AssertLocks) 1889 Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>( 1890 AssertLock, 1891 A->isShared() ? LK_Shared : LK_Exclusive, 1892 Loc, FactEntry::Asserted)); 1893 break; 1894 } 1895 1896 // When we encounter an unlock function, we need to remove unlocked 1897 // mutexes from the lockset, and flag a warning if they are not there. 1898 case attr::ReleaseCapability: { 1899 const auto *A = cast<ReleaseCapabilityAttr>(At); 1900 if (A->isGeneric()) 1901 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self); 1902 else if (A->isShared()) 1903 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self); 1904 else 1905 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self); 1906 break; 1907 } 1908 1909 case attr::RequiresCapability: { 1910 const auto *A = cast<RequiresCapabilityAttr>(At); 1911 for (auto *Arg : A->args()) { 1912 Analyzer->warnIfMutexNotHeld(FSet, D, Exp, 1913 A->isShared() ? AK_Read : AK_Written, 1914 Arg, POK_FunctionCall, Self, Loc); 1915 // use for adopting a lock 1916 if (!Scp.shouldIgnore()) 1917 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self); 1918 } 1919 break; 1920 } 1921 1922 case attr::LocksExcluded: { 1923 const auto *A = cast<LocksExcludedAttr>(At); 1924 for (auto *Arg : A->args()) { 1925 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc); 1926 // use for deferring a lock 1927 if (!Scp.shouldIgnore()) 1928 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self); 1929 } 1930 break; 1931 } 1932 1933 // Ignore attributes unrelated to thread-safety 1934 default: 1935 break; 1936 } 1937 } 1938 1939 std::optional<CallExpr::const_arg_range> Args; 1940 if (Exp) { 1941 if (const auto *CE = dyn_cast<CallExpr>(Exp)) 1942 Args = CE->arguments(); 1943 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Exp)) 1944 Args = CE->arguments(); 1945 else 1946 llvm_unreachable("Unknown call kind"); 1947 } 1948 const auto *CalledFunction = dyn_cast<FunctionDecl>(D); 1949 if (CalledFunction && Args.has_value()) { 1950 for (auto [Param, Arg] : zip(CalledFunction->parameters(), *Args)) { 1951 CapExprSet DeclaredLocks; 1952 for (const Attr *At : Param->attrs()) { 1953 switch (At->getKind()) { 1954 case attr::AcquireCapability: { 1955 const auto *A = cast<AcquireCapabilityAttr>(At); 1956 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1957 : ExclusiveLocksToAdd, 1958 A, Exp, D, Self); 1959 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self); 1960 break; 1961 } 1962 1963 case attr::ReleaseCapability: { 1964 const auto *A = cast<ReleaseCapabilityAttr>(At); 1965 if (A->isGeneric()) 1966 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self); 1967 else if (A->isShared()) 1968 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self); 1969 else 1970 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self); 1971 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self); 1972 break; 1973 } 1974 1975 case attr::RequiresCapability: { 1976 const auto *A = cast<RequiresCapabilityAttr>(At); 1977 for (auto *Arg : A->args()) 1978 Analyzer->warnIfMutexNotHeld(FSet, D, Exp, 1979 A->isShared() ? AK_Read : AK_Written, 1980 Arg, POK_FunctionCall, Self, Loc); 1981 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self); 1982 break; 1983 } 1984 1985 case attr::LocksExcluded: { 1986 const auto *A = cast<LocksExcludedAttr>(At); 1987 for (auto *Arg : A->args()) 1988 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc); 1989 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self); 1990 break; 1991 } 1992 1993 default: 1994 break; 1995 } 1996 } 1997 if (DeclaredLocks.empty()) 1998 continue; 1999 CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr), 2000 StringRef("mutex"), false); 2001 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts()); 2002 Cp.isInvalid() && CBTE) { 2003 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr()); 2004 Object != Analyzer->ConstructedObjects.end()) 2005 Cp = CapabilityExpr(Object->second, StringRef("mutex"), false); 2006 } 2007 const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp); 2008 if (!Fact) { 2009 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK_FunctionCall, 2010 Cp.toString(), LK_Exclusive, 2011 Exp->getExprLoc()); 2012 continue; 2013 } 2014 const auto *Scope = cast<ScopedLockableFactEntry>(Fact); 2015 for (const auto &[a, b] : 2016 zip_longest(DeclaredLocks, Scope->getUnderlyingMutexes())) { 2017 if (!a.has_value()) { 2018 Analyzer->Handler.handleExpectFewerUnderlyingMutexes( 2019 Exp->getExprLoc(), D->getLocation(), Scope->toString(), 2020 b.value().getKind(), b.value().toString()); 2021 } else if (!b.has_value()) { 2022 Analyzer->Handler.handleExpectMoreUnderlyingMutexes( 2023 Exp->getExprLoc(), D->getLocation(), Scope->toString(), 2024 a.value().getKind(), a.value().toString()); 2025 } else if (!a.value().equals(b.value())) { 2026 Analyzer->Handler.handleUnmatchedUnderlyingMutexes( 2027 Exp->getExprLoc(), D->getLocation(), Scope->toString(), 2028 a.value().getKind(), a.value().toString(), b.value().toString()); 2029 break; 2030 } 2031 } 2032 } 2033 } 2034 // Remove locks first to allow lock upgrading/downgrading. 2035 // FIXME -- should only fully remove if the attribute refers to 'this'. 2036 bool Dtor = isa<CXXDestructorDecl>(D); 2037 for (const auto &M : ExclusiveLocksToRemove) 2038 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive); 2039 for (const auto &M : SharedLocksToRemove) 2040 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared); 2041 for (const auto &M : GenericLocksToRemove) 2042 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic); 2043 2044 // Add locks. 2045 FactEntry::SourceKind Source = 2046 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired; 2047 for (const auto &M : ExclusiveLocksToAdd) 2048 Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, 2049 Loc, Source)); 2050 for (const auto &M : SharedLocksToAdd) 2051 Analyzer->addLock( 2052 FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source)); 2053 2054 if (!Scp.shouldIgnore()) { 2055 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 2056 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>( 2057 Scp, Loc, FactEntry::Acquired); 2058 for (const auto &M : ExclusiveLocksToAdd) 2059 ScopedEntry->addLock(M); 2060 for (const auto &M : SharedLocksToAdd) 2061 ScopedEntry->addLock(M); 2062 for (const auto &M : ScopedReqsAndExcludes) 2063 ScopedEntry->addLock(M); 2064 for (const auto &M : ExclusiveLocksToRemove) 2065 ScopedEntry->addExclusiveUnlock(M); 2066 for (const auto &M : SharedLocksToRemove) 2067 ScopedEntry->addSharedUnlock(M); 2068 Analyzer->addLock(FSet, std::move(ScopedEntry)); 2069 } 2070 } 2071 2072 /// For unary operations which read and write a variable, we need to 2073 /// check whether we hold any required mutexes. Reads are checked in 2074 /// VisitCastExpr. 2075 void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { 2076 switch (UO->getOpcode()) { 2077 case UO_PostDec: 2078 case UO_PostInc: 2079 case UO_PreDec: 2080 case UO_PreInc: 2081 checkAccess(UO->getSubExpr(), AK_Written); 2082 break; 2083 default: 2084 break; 2085 } 2086 } 2087 2088 /// For binary operations which assign to a variable (writes), we need to check 2089 /// whether we hold any required mutexes. 2090 /// FIXME: Deal with non-primitive types. 2091 void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { 2092 if (!BO->isAssignmentOp()) 2093 return; 2094 2095 // adjust the context 2096 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 2097 2098 checkAccess(BO->getLHS(), AK_Written); 2099 } 2100 2101 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and 2102 /// need to ensure we hold any required mutexes. 2103 /// FIXME: Deal with non-primitive types. 2104 void BuildLockset::VisitCastExpr(const CastExpr *CE) { 2105 if (CE->getCastKind() != CK_LValueToRValue) 2106 return; 2107 checkAccess(CE->getSubExpr(), AK_Read); 2108 } 2109 2110 void BuildLockset::examineArguments(const FunctionDecl *FD, 2111 CallExpr::const_arg_iterator ArgBegin, 2112 CallExpr::const_arg_iterator ArgEnd, 2113 bool SkipFirstParam) { 2114 // Currently we can't do anything if we don't know the function declaration. 2115 if (!FD) 2116 return; 2117 2118 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it 2119 // only turns off checking within the body of a function, but we also 2120 // use it to turn off checking in arguments to the function. This 2121 // could result in some false negatives, but the alternative is to 2122 // create yet another attribute. 2123 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>()) 2124 return; 2125 2126 const ArrayRef<ParmVarDecl *> Params = FD->parameters(); 2127 auto Param = Params.begin(); 2128 if (SkipFirstParam) 2129 ++Param; 2130 2131 // There can be default arguments, so we stop when one iterator is at end(). 2132 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd; 2133 ++Param, ++Arg) { 2134 QualType Qt = (*Param)->getType(); 2135 if (Qt->isReferenceType()) 2136 checkAccess(*Arg, AK_Read, POK_PassByRef); 2137 } 2138 } 2139 2140 void BuildLockset::VisitCallExpr(const CallExpr *Exp) { 2141 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 2142 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); 2143 // ME can be null when calling a method pointer 2144 const CXXMethodDecl *MD = CE->getMethodDecl(); 2145 2146 if (ME && MD) { 2147 if (ME->isArrow()) { 2148 // Should perhaps be AK_Written if !MD->isConst(). 2149 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 2150 } else { 2151 // Should perhaps be AK_Written if !MD->isConst(). 2152 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2153 } 2154 } 2155 2156 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end()); 2157 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 2158 OverloadedOperatorKind OEop = OE->getOperator(); 2159 switch (OEop) { 2160 case OO_Equal: 2161 case OO_PlusEqual: 2162 case OO_MinusEqual: 2163 case OO_StarEqual: 2164 case OO_SlashEqual: 2165 case OO_PercentEqual: 2166 case OO_CaretEqual: 2167 case OO_AmpEqual: 2168 case OO_PipeEqual: 2169 case OO_LessLessEqual: 2170 case OO_GreaterGreaterEqual: 2171 checkAccess(OE->getArg(1), AK_Read); 2172 [[fallthrough]]; 2173 case OO_PlusPlus: 2174 case OO_MinusMinus: 2175 checkAccess(OE->getArg(0), AK_Written); 2176 break; 2177 case OO_Star: 2178 case OO_ArrowStar: 2179 case OO_Arrow: 2180 case OO_Subscript: 2181 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { 2182 // Grrr. operator* can be multiplication... 2183 checkPtAccess(OE->getArg(0), AK_Read); 2184 } 2185 [[fallthrough]]; 2186 default: { 2187 // TODO: get rid of this, and rely on pass-by-ref instead. 2188 const Expr *Obj = OE->getArg(0); 2189 checkAccess(Obj, AK_Read); 2190 // Check the remaining arguments. For method operators, the first 2191 // argument is the implicit self argument, and doesn't appear in the 2192 // FunctionDecl, but for non-methods it does. 2193 const FunctionDecl *FD = OE->getDirectCallee(); 2194 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(), 2195 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD)); 2196 break; 2197 } 2198 } 2199 } else { 2200 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end()); 2201 } 2202 2203 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2204 if (!D) 2205 return; 2206 handleCall(Exp, D); 2207 } 2208 2209 void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { 2210 const CXXConstructorDecl *D = Exp->getConstructor(); 2211 if (D && D->isCopyConstructor()) { 2212 const Expr* Source = Exp->getArg(0); 2213 checkAccess(Source, AK_Read); 2214 } else { 2215 examineArguments(D, Exp->arg_begin(), Exp->arg_end()); 2216 } 2217 if (D && D->hasAttrs()) 2218 handleCall(Exp, D); 2219 } 2220 2221 static const Expr *UnpackConstruction(const Expr *E) { 2222 if (auto *CE = dyn_cast<CastExpr>(E)) 2223 if (CE->getCastKind() == CK_NoOp) 2224 E = CE->getSubExpr()->IgnoreParens(); 2225 if (auto *CE = dyn_cast<CastExpr>(E)) 2226 if (CE->getCastKind() == CK_ConstructorConversion || 2227 CE->getCastKind() == CK_UserDefinedConversion) 2228 E = CE->getSubExpr(); 2229 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) 2230 E = BTE->getSubExpr(); 2231 return E; 2232 } 2233 2234 void BuildLockset::VisitDeclStmt(const DeclStmt *S) { 2235 // adjust the context 2236 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2237 2238 for (auto *D : S->getDeclGroup()) { 2239 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { 2240 const Expr *E = VD->getInit(); 2241 if (!E) 2242 continue; 2243 E = E->IgnoreParens(); 2244 2245 // handle constructors that involve temporaries 2246 if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) 2247 E = EWC->getSubExpr()->IgnoreParens(); 2248 E = UnpackConstruction(E); 2249 2250 if (auto Object = Analyzer->ConstructedObjects.find(E); 2251 Object != Analyzer->ConstructedObjects.end()) { 2252 Object->second->setClangDecl(VD); 2253 Analyzer->ConstructedObjects.erase(Object); 2254 } 2255 } 2256 } 2257 } 2258 2259 void BuildLockset::VisitMaterializeTemporaryExpr( 2260 const MaterializeTemporaryExpr *Exp) { 2261 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) { 2262 if (auto Object = Analyzer->ConstructedObjects.find( 2263 UnpackConstruction(Exp->getSubExpr())); 2264 Object != Analyzer->ConstructedObjects.end()) { 2265 Object->second->setClangDecl(ExtD); 2266 Analyzer->ConstructedObjects.erase(Object); 2267 } 2268 } 2269 } 2270 2271 void BuildLockset::VisitReturnStmt(const ReturnStmt *S) { 2272 if (Analyzer->CurrentFunction == nullptr) 2273 return; 2274 const Expr *RetVal = S->getRetValue(); 2275 if (!RetVal) 2276 return; 2277 2278 // If returning by reference, check that the function requires the appropriate 2279 // capabilities. 2280 const QualType ReturnType = 2281 Analyzer->CurrentFunction->getReturnType().getCanonicalType(); 2282 if (ReturnType->isLValueReferenceType()) { 2283 Analyzer->checkAccess( 2284 FunctionExitFSet, RetVal, 2285 ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written, 2286 POK_ReturnByRef); 2287 } 2288 } 2289 2290 /// Given two facts merging on a join point, possibly warn and decide whether to 2291 /// keep or replace. 2292 /// 2293 /// \param CanModify Whether we can replace \p A by \p B. 2294 /// \return false if we should keep \p A, true if we should take \p B. 2295 bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B, 2296 bool CanModify) { 2297 if (A.kind() != B.kind()) { 2298 // For managed capabilities, the destructor should unlock in the right mode 2299 // anyway. For asserted capabilities no unlocking is needed. 2300 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) { 2301 // The shared capability subsumes the exclusive capability, if possible. 2302 bool ShouldTakeB = B.kind() == LK_Shared; 2303 if (CanModify || !ShouldTakeB) 2304 return ShouldTakeB; 2305 } 2306 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(), 2307 A.loc()); 2308 // Take the exclusive capability to reduce further warnings. 2309 return CanModify && B.kind() == LK_Exclusive; 2310 } else { 2311 // The non-asserted capability is the one we want to track. 2312 return CanModify && A.asserted() && !B.asserted(); 2313 } 2314 } 2315 2316 /// Compute the intersection of two locksets and issue warnings for any 2317 /// locks in the symmetric difference. 2318 /// 2319 /// This function is used at a merge point in the CFG when comparing the lockset 2320 /// of each branch being merged. For example, given the following sequence: 2321 /// A; if () then B; else C; D; we need to check that the lockset after B and C 2322 /// are the same. In the event of a difference, we use the intersection of these 2323 /// two locksets at the start of D. 2324 /// 2325 /// \param EntrySet A lockset for entry into a (possibly new) block. 2326 /// \param ExitSet The lockset on exiting a preceding block. 2327 /// \param JoinLoc The location of the join point for error reporting 2328 /// \param EntryLEK The warning if a mutex is missing from \p EntrySet. 2329 /// \param ExitLEK The warning if a mutex is missing from \p ExitSet. 2330 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet, 2331 const FactSet &ExitSet, 2332 SourceLocation JoinLoc, 2333 LockErrorKind EntryLEK, 2334 LockErrorKind ExitLEK) { 2335 FactSet EntrySetOrig = EntrySet; 2336 2337 // Find locks in ExitSet that conflict or are not in EntrySet, and warn. 2338 for (const auto &Fact : ExitSet) { 2339 const FactEntry &ExitFact = FactMan[Fact]; 2340 2341 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact); 2342 if (EntryIt != EntrySet.end()) { 2343 if (join(FactMan[*EntryIt], ExitFact, 2344 EntryLEK != LEK_LockedSomeLoopIterations)) 2345 *EntryIt = Fact; 2346 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) { 2347 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc, 2348 EntryLEK, Handler); 2349 } 2350 } 2351 2352 // Find locks in EntrySet that are not in ExitSet, and remove them. 2353 for (const auto &Fact : EntrySetOrig) { 2354 const FactEntry *EntryFact = &FactMan[Fact]; 2355 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact); 2356 2357 if (!ExitFact) { 2358 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations || 2359 ExitLEK == LEK_NotLockedAtEndOfFunction) 2360 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc, 2361 ExitLEK, Handler); 2362 if (ExitLEK == LEK_LockedSomePredecessors) 2363 EntrySet.removeLock(FactMan, *EntryFact); 2364 } 2365 } 2366 } 2367 2368 // Return true if block B never continues to its successors. 2369 static bool neverReturns(const CFGBlock *B) { 2370 if (B->hasNoReturnElement()) 2371 return true; 2372 if (B->empty()) 2373 return false; 2374 2375 CFGElement Last = B->back(); 2376 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2377 if (isa<CXXThrowExpr>(S->getStmt())) 2378 return true; 2379 } 2380 return false; 2381 } 2382 2383 /// Check a function's CFG for thread-safety violations. 2384 /// 2385 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2386 /// at the end of each block, and issue warnings for thread safety violations. 2387 /// Each block in the CFG is traversed exactly once. 2388 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2389 // TODO: this whole function needs be rewritten as a visitor for CFGWalker. 2390 // For now, we just use the walker to set things up. 2391 threadSafety::CFGWalker walker; 2392 if (!walker.init(AC)) 2393 return; 2394 2395 // AC.dumpCFG(true); 2396 // threadSafety::printSCFG(walker); 2397 2398 CFG *CFGraph = walker.getGraph(); 2399 const NamedDecl *D = walker.getDecl(); 2400 CurrentFunction = dyn_cast<FunctionDecl>(D); 2401 2402 if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) 2403 return; 2404 2405 // FIXME: Do something a bit more intelligent inside constructor and 2406 // destructor code. Constructors and destructors must assume unique access 2407 // to 'this', so checks on member variable access is disabled, but we should 2408 // still enable checks on other objects. 2409 if (isa<CXXConstructorDecl>(D)) 2410 return; // Don't check inside constructors. 2411 if (isa<CXXDestructorDecl>(D)) 2412 return; // Don't check inside destructors. 2413 2414 Handler.enterFunction(CurrentFunction); 2415 2416 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2417 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2418 2419 // We need to explore the CFG via a "topological" ordering. 2420 // That way, we will be guaranteed to have information about required 2421 // predecessor locksets when exploring a new block. 2422 const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); 2423 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2424 2425 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()]; 2426 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()]; 2427 2428 // Mark entry block as reachable 2429 Initial.Reachable = true; 2430 2431 // Compute SSA names for local variables 2432 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2433 2434 // Fill in source locations for all CFGBlocks. 2435 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2436 2437 CapExprSet ExclusiveLocksAcquired; 2438 CapExprSet SharedLocksAcquired; 2439 CapExprSet LocksReleased; 2440 2441 // Add locks from exclusive_locks_required and shared_locks_required 2442 // to initial lockset. Also turn off checking for lock and unlock functions. 2443 // FIXME: is there a more intelligent way to check lock/unlock functions? 2444 if (!SortedGraph->empty()) { 2445 assert(*SortedGraph->begin() == &CFGraph->getEntry()); 2446 FactSet &InitialLockset = Initial.EntrySet; 2447 2448 CapExprSet ExclusiveLocksToAdd; 2449 CapExprSet SharedLocksToAdd; 2450 2451 SourceLocation Loc = D->getLocation(); 2452 for (const auto *Attr : D->attrs()) { 2453 Loc = Attr->getLocation(); 2454 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2455 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2456 nullptr, D); 2457 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2458 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2459 // We must ignore such methods. 2460 if (A->args_size() == 0) 2461 return; 2462 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2463 nullptr, D); 2464 getMutexIDs(LocksReleased, A, nullptr, D); 2465 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2466 if (A->args_size() == 0) 2467 return; 2468 getMutexIDs(A->isShared() ? SharedLocksAcquired 2469 : ExclusiveLocksAcquired, 2470 A, nullptr, D); 2471 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2472 // Don't try to check trylock functions for now. 2473 return; 2474 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2475 // Don't try to check trylock functions for now. 2476 return; 2477 } else if (isa<TryAcquireCapabilityAttr>(Attr)) { 2478 // Don't try to check trylock functions for now. 2479 return; 2480 } 2481 } 2482 ArrayRef<ParmVarDecl *> Params; 2483 if (CurrentFunction) 2484 Params = CurrentFunction->getCanonicalDecl()->parameters(); 2485 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(D)) 2486 Params = CurrentMethod->getCanonicalDecl()->parameters(); 2487 else 2488 llvm_unreachable("Unknown function kind"); 2489 for (const ParmVarDecl *Param : Params) { 2490 CapExprSet UnderlyingLocks; 2491 for (const auto *Attr : Param->attrs()) { 2492 Loc = Attr->getLocation(); 2493 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2494 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2495 nullptr, Param); 2496 getMutexIDs(LocksReleased, A, nullptr, Param); 2497 getMutexIDs(UnderlyingLocks, A, nullptr, Param); 2498 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2499 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2500 nullptr, Param); 2501 getMutexIDs(UnderlyingLocks, A, nullptr, Param); 2502 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2503 getMutexIDs(A->isShared() ? SharedLocksAcquired 2504 : ExclusiveLocksAcquired, 2505 A, nullptr, Param); 2506 getMutexIDs(UnderlyingLocks, A, nullptr, Param); 2507 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) { 2508 getMutexIDs(UnderlyingLocks, A, nullptr, Param); 2509 } 2510 } 2511 if (UnderlyingLocks.empty()) 2512 continue; 2513 CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(), false); 2514 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>( 2515 Cp, Param->getLocation(), FactEntry::Declared); 2516 for (const CapabilityExpr &M : UnderlyingLocks) 2517 ScopedEntry->addLock(M); 2518 addLock(InitialLockset, std::move(ScopedEntry), true); 2519 } 2520 2521 // FIXME -- Loc can be wrong here. 2522 for (const auto &Mu : ExclusiveLocksToAdd) { 2523 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc, 2524 FactEntry::Declared); 2525 addLock(InitialLockset, std::move(Entry), true); 2526 } 2527 for (const auto &Mu : SharedLocksToAdd) { 2528 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc, 2529 FactEntry::Declared); 2530 addLock(InitialLockset, std::move(Entry), true); 2531 } 2532 } 2533 2534 // Compute the expected exit set. 2535 // By default, we expect all locks held on entry to be held on exit. 2536 FactSet ExpectedFunctionExitSet = Initial.EntrySet; 2537 2538 // Adjust the expected exit set by adding or removing locks, as declared 2539 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2540 // issue the appropriate warning. 2541 // FIXME: the location here is not quite right. 2542 for (const auto &Lock : ExclusiveLocksAcquired) 2543 ExpectedFunctionExitSet.addLock( 2544 FactMan, std::make_unique<LockableFactEntry>(Lock, LK_Exclusive, 2545 D->getLocation())); 2546 for (const auto &Lock : SharedLocksAcquired) 2547 ExpectedFunctionExitSet.addLock( 2548 FactMan, 2549 std::make_unique<LockableFactEntry>(Lock, LK_Shared, D->getLocation())); 2550 for (const auto &Lock : LocksReleased) 2551 ExpectedFunctionExitSet.removeLock(FactMan, Lock); 2552 2553 for (const auto *CurrBlock : *SortedGraph) { 2554 unsigned CurrBlockID = CurrBlock->getBlockID(); 2555 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2556 2557 // Use the default initial lockset in case there are no predecessors. 2558 VisitedBlocks.insert(CurrBlock); 2559 2560 // Iterate through the predecessor blocks and warn if the lockset for all 2561 // predecessors is not the same. We take the entry lockset of the current 2562 // block to be the intersection of all previous locksets. 2563 // FIXME: By keeping the intersection, we may output more errors in future 2564 // for a lock which is not in the intersection, but was in the union. We 2565 // may want to also keep the union in future. As an example, let's say 2566 // the intersection contains Mutex L, and the union contains L and M. 2567 // Later we unlock M. At this point, we would output an error because we 2568 // never locked M; although the real error is probably that we forgot to 2569 // lock M on all code paths. Conversely, let's say that later we lock M. 2570 // In this case, we should compare against the intersection instead of the 2571 // union because the real error is probably that we forgot to unlock M on 2572 // all code paths. 2573 bool LocksetInitialized = false; 2574 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2575 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2576 // if *PI -> CurrBlock is a back edge 2577 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) 2578 continue; 2579 2580 unsigned PrevBlockID = (*PI)->getBlockID(); 2581 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2582 2583 // Ignore edges from blocks that can't return. 2584 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2585 continue; 2586 2587 // Okay, we can reach this block from the entry. 2588 CurrBlockInfo->Reachable = true; 2589 2590 FactSet PrevLockset; 2591 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2592 2593 if (!LocksetInitialized) { 2594 CurrBlockInfo->EntrySet = PrevLockset; 2595 LocksetInitialized = true; 2596 } else { 2597 // Surprisingly 'continue' doesn't always produce back edges, because 2598 // the CFG has empty "transition" blocks where they meet with the end 2599 // of the regular loop body. We still want to diagnose them as loop. 2600 intersectAndWarn( 2601 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc, 2602 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt()) 2603 ? LEK_LockedSomeLoopIterations 2604 : LEK_LockedSomePredecessors); 2605 } 2606 } 2607 2608 // Skip rest of block if it's not reachable. 2609 if (!CurrBlockInfo->Reachable) 2610 continue; 2611 2612 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet); 2613 2614 // Visit all the statements in the basic block. 2615 for (const auto &BI : *CurrBlock) { 2616 switch (BI.getKind()) { 2617 case CFGElement::Statement: { 2618 CFGStmt CS = BI.castAs<CFGStmt>(); 2619 LocksetBuilder.Visit(CS.getStmt()); 2620 break; 2621 } 2622 // Ignore BaseDtor and MemberDtor for now. 2623 case CFGElement::AutomaticObjectDtor: { 2624 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); 2625 const auto *DD = AD.getDestructorDecl(AC.getASTContext()); 2626 if (!DD->hasAttrs()) 2627 break; 2628 2629 LocksetBuilder.handleCall(nullptr, DD, 2630 SxBuilder.createVariable(AD.getVarDecl()), 2631 AD.getTriggerStmt()->getEndLoc()); 2632 break; 2633 } 2634 2635 case CFGElement::CleanupFunction: { 2636 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>(); 2637 LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(), 2638 SxBuilder.createVariable(CF.getVarDecl()), 2639 CF.getVarDecl()->getLocation()); 2640 break; 2641 } 2642 2643 case CFGElement::TemporaryDtor: { 2644 auto TD = BI.castAs<CFGTemporaryDtor>(); 2645 2646 // Clean up constructed object even if there are no attributes to 2647 // keep the number of objects in limbo as small as possible. 2648 if (auto Object = ConstructedObjects.find( 2649 TD.getBindTemporaryExpr()->getSubExpr()); 2650 Object != ConstructedObjects.end()) { 2651 const auto *DD = TD.getDestructorDecl(AC.getASTContext()); 2652 if (DD->hasAttrs()) 2653 // TODO: the location here isn't quite correct. 2654 LocksetBuilder.handleCall(nullptr, DD, Object->second, 2655 TD.getBindTemporaryExpr()->getEndLoc()); 2656 ConstructedObjects.erase(Object); 2657 } 2658 break; 2659 } 2660 default: 2661 break; 2662 } 2663 } 2664 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2665 2666 // For every back edge from CurrBlock (the end of the loop) to another block 2667 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2668 // the one held at the beginning of FirstLoopBlock. We can look up the 2669 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2670 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2671 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2672 // if CurrBlock -> *SI is *not* a back edge 2673 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 2674 continue; 2675 2676 CFGBlock *FirstLoopBlock = *SI; 2677 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2678 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2679 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc, 2680 LEK_LockedSomeLoopIterations); 2681 } 2682 } 2683 2684 // Skip the final check if the exit block is unreachable. 2685 if (!Final.Reachable) 2686 return; 2687 2688 // FIXME: Should we call this function for all blocks which exit the function? 2689 intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc, 2690 LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction); 2691 2692 Handler.leaveFunction(CurrentFunction); 2693 } 2694 2695 /// Check a function's CFG for thread-safety violations. 2696 /// 2697 /// We traverse the blocks in the CFG, compute the set of mutexes that are held 2698 /// at the end of each block, and issue warnings for thread safety violations. 2699 /// Each block in the CFG is traversed exactly once. 2700 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2701 ThreadSafetyHandler &Handler, 2702 BeforeSet **BSet) { 2703 if (!*BSet) 2704 *BSet = new BeforeSet; 2705 ThreadSafetyAnalyzer Analyzer(Handler, *BSet); 2706 Analyzer.runAnalysis(AC); 2707 } 2708 2709 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } 2710 2711 /// Helper function that returns a LockKind required for the given level 2712 /// of access. 2713 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { 2714 switch (AK) { 2715 case AK_Read : 2716 return LK_Shared; 2717 case AK_Written : 2718 return LK_Exclusive; 2719 } 2720 llvm_unreachable("Unknown AccessKind"); 2721 } 2722