1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between AddressSanitizer and ThreadSanitizer 10 // run-time libraries. 11 //===----------------------------------------------------------------------===// 12 13 #include "sanitizer_stackdepot.h" 14 15 #include "sanitizer_common.h" 16 #include "sanitizer_hash.h" 17 #include "sanitizer_stack_store.h" 18 #include "sanitizer_stackdepotbase.h" 19 20 namespace __sanitizer { 21 22 struct StackDepotNode { 23 using hash_type = u64; 24 hash_type stack_hash; 25 u32 link; 26 27 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; 28 29 typedef StackTrace args_type; 30 bool eq(hash_type hash, const args_type &args) const { 31 return hash == stack_hash; 32 } 33 static uptr allocated(); 34 static hash_type hash(const args_type &args) { 35 MurMur2Hash64Builder H(args.size * sizeof(uptr)); 36 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]); 37 H.add(args.tag); 38 return H.get(); 39 } 40 static bool is_valid(const args_type &args) { 41 return args.size > 0 && args.trace; 42 } 43 void store(u32 id, const args_type &args, hash_type hash); 44 args_type load(u32 id) const; 45 static StackDepotHandle get_handle(u32 id); 46 47 typedef StackDepotHandle handle_type; 48 }; 49 50 static StackStore stackStore; 51 52 // FIXME(dvyukov): this single reserved bit is used in TSan. 53 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog> 54 StackDepot; 55 static StackDepot theDepot; 56 // Keep rarely accessed stack traces out of frequently access nodes to improve 57 // caching efficiency. 58 static TwoLevelMap<StackStore::Id, StackDepot::kNodesSize1, 59 StackDepot::kNodesSize2> 60 storeIds; 61 // Keep mutable data out of frequently access nodes to improve caching 62 // efficiency. 63 static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1, 64 StackDepot::kNodesSize2> 65 useCounts; 66 67 int StackDepotHandle::use_count() const { 68 return atomic_load_relaxed(&useCounts[id_]); 69 } 70 71 void StackDepotHandle::inc_use_count_unsafe() { 72 atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed); 73 } 74 75 uptr StackDepotNode::allocated() { 76 return stackStore.Allocated() + storeIds.MemoryUsage() + 77 useCounts.MemoryUsage(); 78 } 79 80 void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { 81 stack_hash = hash; 82 storeIds[id] = stackStore.Store(args); 83 } 84 85 StackDepotNode::args_type StackDepotNode::load(u32 id) const { 86 StackStore::Id store_id = storeIds[id]; 87 if (!store_id) 88 return {}; 89 return stackStore.Load(store_id); 90 } 91 92 StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } 93 94 u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); } 95 96 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) { 97 return StackDepotNode::get_handle(theDepot.Put(stack)); 98 } 99 100 StackTrace StackDepotGet(u32 id) { 101 return theDepot.Get(id); 102 } 103 104 void StackDepotLockAll() { 105 theDepot.LockAll(); 106 } 107 108 void StackDepotUnlockAll() { 109 theDepot.UnlockAll(); 110 } 111 112 void StackDepotPrintAll() { 113 #if !SANITIZER_GO 114 theDepot.PrintAll(); 115 #endif 116 } 117 118 StackDepotHandle StackDepotNode::get_handle(u32 id) { 119 return StackDepotHandle(&theDepot.nodes[id], id); 120 } 121 122 void StackDepotTestOnlyUnmap() { 123 theDepot.TestOnlyUnmap(); 124 storeIds.TestOnlyUnmap(); 125 stackStore.TestOnlyUnmap(); 126 } 127 128 } // namespace __sanitizer 129