1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between AddressSanitizer and ThreadSanitizer 10 // run-time libraries. 11 //===----------------------------------------------------------------------===// 12 13 #include "sanitizer_stackdepot.h" 14 15 #include "sanitizer_common.h" 16 #include "sanitizer_hash.h" 17 #include "sanitizer_persistent_allocator.h" 18 #include "sanitizer_stackdepotbase.h" 19 20 namespace __sanitizer { 21 22 static PersistentAllocator<uptr> traceAllocator; 23 24 struct StackDepotNode { 25 using hash_type = u64; 26 hash_type stack_hash; 27 u32 link; 28 u32 tag; 29 30 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; 31 32 typedef StackTrace args_type; 33 bool eq(hash_type hash, const args_type &args) const { 34 return hash == stack_hash; 35 } 36 static uptr allocated(); 37 static hash_type hash(const args_type &args) { 38 MurMur2Hash64Builder H(args.size * sizeof(uptr)); 39 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]); 40 H.add(args.tag); 41 return H.get(); 42 } 43 static bool is_valid(const args_type &args) { 44 return args.size > 0 && args.trace; 45 } 46 void store(u32 id, const args_type &args, hash_type hash); 47 args_type load(u32 id) const; 48 static StackDepotHandle get_handle(u32 id); 49 50 typedef StackDepotHandle handle_type; 51 }; 52 53 // FIXME(dvyukov): this single reserved bit is used in TSan. 54 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog> 55 StackDepot; 56 static StackDepot theDepot; 57 // Keep rarely accessed stack traces out of frequently access nodes to improve 58 // caching efficiency. 59 static TwoLevelMap<uptr *, StackDepot::kNodesSize1, StackDepot::kNodesSize2> 60 tracePtrs; 61 // Keep mutable data out of frequently access nodes to improve caching 62 // efficiency. 63 static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1, 64 StackDepot::kNodesSize2> 65 useCounts; 66 67 int StackDepotHandle::use_count() const { 68 return atomic_load_relaxed(&useCounts[id_]); 69 } 70 71 void StackDepotHandle::inc_use_count_unsafe() { 72 atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed); 73 } 74 75 uptr StackDepotNode::allocated() { 76 return traceAllocator.allocated() + tracePtrs.MemoryUsage() + 77 useCounts.MemoryUsage(); 78 } 79 80 void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { 81 tag = args.tag; 82 stack_hash = hash; 83 uptr *stack_trace = traceAllocator.alloc(args.size + 1); 84 *stack_trace = args.size; 85 internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr)); 86 tracePtrs[id] = stack_trace; 87 } 88 89 StackDepotNode::args_type StackDepotNode::load(u32 id) const { 90 const uptr *stack_trace = tracePtrs[id]; 91 if (!stack_trace) 92 return {}; 93 return args_type(stack_trace + 1, *stack_trace, tag); 94 } 95 96 StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } 97 98 u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); } 99 100 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) { 101 return StackDepotNode::get_handle(theDepot.Put(stack)); 102 } 103 104 StackTrace StackDepotGet(u32 id) { 105 return theDepot.Get(id); 106 } 107 108 void StackDepotLockAll() { 109 theDepot.LockAll(); 110 } 111 112 void StackDepotUnlockAll() { 113 theDepot.UnlockAll(); 114 } 115 116 void StackDepotPrintAll() { 117 #if !SANITIZER_GO 118 theDepot.PrintAll(); 119 #endif 120 } 121 122 StackDepotHandle StackDepotNode::get_handle(u32 id) { 123 return StackDepotHandle(&theDepot.nodes[id], id); 124 } 125 126 void StackDepotTestOnlyUnmap() { 127 theDepot.TestOnlyUnmap(); 128 tracePtrs.TestOnlyUnmap(); 129 traceAllocator.TestOnlyUnmap(); 130 } 131 132 } // namespace __sanitizer 133